commit 5d9b9a6d9f79784916e227708f9affa8524480d3 Author: zty8080123 <1136120063@qq.com> Date: Wed Aug 7 09:32:38 2024 +0800 添加注释 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..702ce01 --- /dev/null +++ b/.gitignore @@ -0,0 +1,35 @@ +# .gitignore +# 首先忽略所有的文件 +* +# 但是不忽略目录 +!*/ +# 忽略一些指定的目录名 +ut/ +runs/ +.vscode/ +build/ +result1/ +result/ +mytest/ +mytest_double/ +pretrained_model/ +gangao/ +extra/ +ccpd/ +*.pyc +# 不忽略下面指定的文件类型 +!*.cpp +!*.h +!*.hpp +!*.c +!.gitignore +!*.py +!*.sh +!*.npy +!*.jpg +!*.pt +!*.npy +!*.pth +!*.png +!*.yaml +!*.md \ No newline at end of file diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..26d3352 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,3 @@ +# Default ignored files +/shelf/ +/workspace.xml diff --git a/README.md b/README.md new file mode 100644 index 0000000..389da50 --- /dev/null +++ b/README.md @@ -0,0 +1,93 @@ +## What's New + +**2022.12.04 车辆和车牌一起检测看这里[车辆系统](https://github.com/we0091234/Car_recognition)** + +[yolov8 车牌检测+识别](https://github.com/we0091234/yolov8-plate) + +[yolov7 车牌检测+识别](https://github.com/we0091234/yolov7_plate) + +[安卓NCNN](https://github.com/Ayers-github/Chinese-License-Plate-Recognition) + +## 联系 +模型用的是公开数据集训练出来的,需要准确率更高的模型,或者商务合作请加 + +**wechat: we0091234 (注明来意)** + +## **最全车牌识别算法,支持12种中文车牌类型** + +**环境要求: python >=3.6 pytorch >=1.7** + +#### **图片测试demo:** + +直接运行detect_plate.py 或者运行如下命令行: + +``` +python detect_plate.py --detect_model weights/plate_detect.pt --rec_model weights/plate_rec_color.pth --image_path imgs --output result +``` + +测试文件夹imgs,结果保存再 result 文件夹中 + +#### 视频测试demo [2.MP4](https://pan.baidu.com/s/1O1sT8hCEwJZmVScDwBHgOg) 提取码:41aq + +``` +python detect_plate.py --detect_model weights/plate_detect.pt --rec_model weights/plate_rec_color.pth --video 2.mp4 +``` + +视频文件为2.mp4 保存为result.mp4 + +## **车牌检测训练** + +车牌检测训练链接如下: + +[车牌检测训练](https://github.com/we0091234/Chinese_license_plate_detection_recognition/tree/main/readme) + +## **车牌识别训练** + +车牌识别训练链接如下: + +[车牌识别训练](https://github.com/we0091234/crnn_plate_recognition) + +#### **支持如下:** + +- [X] 1.单行蓝牌 +- [X] 2.单行黄牌 +- [X] 3.新能源车牌 +- [X] 4.白色警用车牌 +- [X] 5.教练车牌 +- [X] 6.武警车牌 +- [X] 7.双层黄牌 +- [X] 8.双层白牌 +- [X] 9.使馆车牌 +- [X] 10.港澳粤Z牌 +- [X] 11.双层绿牌 +- [X] 12.民航车牌 + +![Image ](image/README/test_1.jpg) + +## 部署 + +1.[安卓NCNN](https://github.com/Ayers-github/Chinese-License-Plate-Recognition) + +2.**onnx demo** 百度网盘: [k874](https://pan.baidu.com/s/1K3L3xubd6pXIreAydvUm4g) + +``` +python onnx_infer.py --detect_model weights/plate_detect.onnx --rec_model weights/plate_rec_color.onnx --image_path imgs --output result_onnx +``` + +3.**tensorrt** 部署见[tensorrt_plate](https://github.com/we0091234/chinese_plate_tensorrt) + +4.**openvino demo** 版本2022.2 + +``` + python openvino_infer.py --detect_model weights/plate_detect.onnx --rec_model weights/plate_rec.onnx --image_path imgs --output result_openvino +``` + +## References + +* [https://github.com/deepcam-cn/yolov5-face](https://github.com/deepcam-cn/yolov5-face) +* [https://github.com/Sierkinhane/CRNN_Chinese_Characters_Rec](https://github.com/Sierkinhane/CRNN_Chinese_Characters_Rec) + +## More +**qq群:871797331(已满) 837982567(二群) 询问** + +![Image ](image/README/105384078.png) diff --git a/ccpd_process.py b/ccpd_process.py new file mode 100644 index 0000000..cf7bfdf --- /dev/null +++ b/ccpd_process.py @@ -0,0 +1,196 @@ +import os +import shutil +import cv2 +import numpy as np +def allFilePath(rootPath,allFIleList): + fileList = os.listdir(rootPath) + for temp in fileList: + if os.path.isfile(os.path.join(rootPath,temp)): + if temp.endswith(".jpg"): + allFIleList.append(os.path.join(rootPath,temp)) + else: + allFilePath(os.path.join(rootPath,temp),allFIleList) + +def order_points(pts): + # initialzie a list of coordinates that will be ordered + # such that the first entry in the list is the top-left, + # the second entry is the top-right, the third is the + # bottom-right, and the fourth is the bottom-left + pts=pts[:4,:] + rect = np.zeros((5, 2), dtype = "float32") + + # the top-left point will have the smallest sum, whereas + # the bottom-right point will have the largest sum + s = pts.sum(axis = 1) + rect[0] = pts[np.argmin(s)] + rect[2] = pts[np.argmax(s)] + + # now, compute the difference between the points, the + # top-right point will have the smallest difference, + # whereas the bottom-left will have the largest difference + diff = np.diff(pts, axis = 1) + rect[1] = pts[np.argmin(diff)] + rect[3] = pts[np.argmax(diff)] + + # return the ordered coordinates + return rect + +def get_partical_ccpd(): + ccpd_dir = r"/mnt/Gpan/BaiduNetdiskDownload/CCPD1/CCPD2020/ccpd_green" + save_Path = r"ccpd/green_plate" + folder_list = os.listdir(ccpd_dir) + for folder_name in folder_list: + count=0 + folder_path = os.path.join(ccpd_dir,folder_name) + if os.path.isfile(folder_path): + continue + if folder_name == "ccpd_fn": + continue + name_list = os.listdir(folder_path) + + save_folder=save_Path + if not os.path.exists(save_folder): + os.mkdir(save_folder) + + for name in name_list: + file_path = os.path.join(folder_path,name) + count+=1 + if count>1000: + break + new_file_path =os.path.join(save_folder,name) + shutil.move(file_path,new_file_path) + print(count,new_file_path) + +def get_rect_and_landmarks(img_path): + file_name = img_path.split("/")[-1].split("-") + landmarks_np =np.zeros((5,2)) + rect = file_name[2].split("_") + landmarks=file_name[3].split("_") + rect_str = "&".join(rect) + landmarks_str= "&".join(landmarks) + rect= rect_str.split("&") + landmarks=landmarks_str.split("&") + rect=[int(x) for x in rect] + landmarks=[int(x) for x in landmarks] + for i in range(4): + landmarks_np[i][0]=landmarks[2*i] + landmarks_np[i][1]=landmarks[2*i+1] +# middle_landmark_w =int((landmarks[4]+landmarks[6])/2) +# middle_landmark_h =int((landmarks[5]+landmarks[7])/2) +# landmarks.append(middle_landmark_w) +# landmarks.append(middle_landmark_h) + landmarks_np_new=order_points(landmarks_np) +# landmarks_np_new[4]=np.array([middle_landmark_w,middle_landmark_h]) + return rect,landmarks,landmarks_np_new + +def x1x2y1y2_yolo(rect,landmarks,img): + h,w,c =img.shape + rect[0] = max(0, rect[0]) + rect[1] = max(0, rect[1]) + rect[2] = min(w - 1, rect[2]-rect[0]) + rect[3] = min(h - 1, rect[3]-rect[1]) + annotation = np.zeros((1, 14)) + annotation[0, 0] = (rect[0] + rect[2] / 2) / w # cx + annotation[0, 1] = (rect[1] + rect[3] / 2) / h # cy + annotation[0, 2] = rect[2] / w # w + annotation[0, 3] = rect[3] / h # h + + annotation[0, 4] = landmarks[0] / w # l0_x + annotation[0, 5] = landmarks[1] / h # l0_y + annotation[0, 6] = landmarks[2] / w # l1_x + annotation[0, 7] = landmarks[3] / h # l1_y + annotation[0, 8] = landmarks[4] / w # l2_x + annotation[0, 9] = landmarks[5] / h # l2_y + annotation[0, 10] = landmarks[6] / w # l3_x + annotation[0, 11] = landmarks[7] / h # l3_y + # annotation[0, 12] = landmarks[8] / w # l4_x + # annotation[0, 13] = landmarks[9] / h # l4_y + return annotation + +def xywh2yolo(rect,landmarks_sort,img): + h,w,c =img.shape + rect[0] = max(0, rect[0]) + rect[1] = max(0, rect[1]) + rect[2] = min(w - 1, rect[2]-rect[0]) + rect[3] = min(h - 1, rect[3]-rect[1]) + annotation = np.zeros((1, 12)) + annotation[0, 0] = (rect[0] + rect[2] / 2) / w # cx + annotation[0, 1] = (rect[1] + rect[3] / 2) / h # cy + annotation[0, 2] = rect[2] / w # w + annotation[0, 3] = rect[3] / h # h + + annotation[0, 4] = landmarks_sort[0][0] / w # l0_x + annotation[0, 5] = landmarks_sort[0][1] / h # l0_y + annotation[0, 6] = landmarks_sort[1][0] / w # l1_x + annotation[0, 7] = landmarks_sort[1][1] / h # l1_y + annotation[0, 8] = landmarks_sort[2][0] / w # l2_x + annotation[0, 9] = landmarks_sort[2][1] / h # l2_y + annotation[0, 10] = landmarks_sort[3][0] / w # l3_x + annotation[0, 11] = landmarks_sort[3][1] / h # l3_y + # annotation[0, 12] = landmarks_sort[4][0] / w # l4_x + # annotation[0, 13] = landmarks_sort[4][1] / h # l4_y + return annotation + +def yolo2x1y1x2y2(annotation,img): + h,w,c = img.shape + rect= annotation[:,0:4].squeeze().tolist() + landmarks=annotation[:,4:].squeeze().tolist() + rect_w = w*rect[2] + rect_h =h*rect[3] + rect_x =int(rect[0]*w-rect_w/2) + rect_y = int(rect[1]*h-rect_h/2) + new_rect=[rect_x,rect_y,rect_x+rect_w,rect_y+rect_h] + for i in range(5): + landmarks[2*i]=landmarks[2*i]*w + landmarks[2*i+1]=landmarks[2*i+1]*h + return new_rect,landmarks + +def write_lable(file_path): + pass + + +if __name__ == '__main__': + file_root = r"ccpd" + file_list=[] + count=0 + allFilePath(file_root,file_list) + for img_path in file_list: + count+=1 + # img_path = r"ccpd_yolo_test/02-90_85-173&466_452&541-452&553_176&556_178&463_454&460-0_0_6_26_15_26_32-68-53.jpg" + text_path= img_path.replace(".jpg",".txt") + img =cv2.imread(img_path) + rect,landmarks,landmarks_sort=get_rect_and_landmarks(img_path) + # annotation=x1x2y1y2_yolo(rect,landmarks,img) + annotation=xywh2yolo(rect,landmarks_sort,img) + str_label = "0 " + for i in range(len(annotation[0])): + str_label = str_label + " " + str(annotation[0][i]) + str_label = str_label.replace('[', '').replace(']', '') + str_label = str_label.replace(',', '') + '\n' + with open(text_path,"w") as f: + f.write(str_label) + print(count,img_path) + # get_partical_ccpd() + # file_root = r"ccpd/green_plate" + # file_list=[] + # allFilePath(file_root,file_list) + # count=0 + # for img_path in file_list: + # img_name = img_path.split(os.sep)[-1] + # if not "&" in img_name: + # count+=1 + # os.remove(img_path) + # print(count,img_path) + + # new_rect,new_landmarks=yolo2x1y1x2y2(annotation,img) + # rect= [int(x) for x in new_rect] + # cv2.rectangle(img,(rect[0],rect[1]),(rect[2],rect[3]),(255,0,0),2) + # colors=[(0,255,0),(0,255,255),(255,255,0),(255,255,255),(255,0,255)] #绿 黄 青 白 + # for i in range(5): + # cv2.circle(img,(landmarks[2*i],landmarks[2*i+1]),2,colors[i],2) + # cv2.imwrite("1.jpg",img) + # print(rect,landmarks) + # get_partical_ccpd() + + + \ No newline at end of file diff --git a/data/argoverse_hd.yaml b/data/argoverse_hd.yaml new file mode 100644 index 0000000..df7a936 --- /dev/null +++ b/data/argoverse_hd.yaml @@ -0,0 +1,21 @@ +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Train command: python train.py --data argoverse_hd.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /argoverse +# /yolov5 + + +# download command/URL (optional) +download: bash data/scripts/get_argoverse_hd.sh + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images +val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges +test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview + +# number of classes +nc: 8 + +# class names +names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] diff --git a/data/coco.yaml b/data/coco.yaml new file mode 100644 index 0000000..b9da2bf --- /dev/null +++ b/data/coco.yaml @@ -0,0 +1,35 @@ +# COCO 2017 dataset http://cocodataset.org +# Train command: python train.py --data coco.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /coco +# /yolov5 + + +# download command/URL (optional) +download: bash data/scripts/get_coco.sh + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: ../coco/train2017.txt # 118287 images +val: ../coco/val2017.txt # 5000 images +test: ../coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 + +# number of classes +nc: 80 + +# class names +names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush' ] + +# Print classes +# with open('data/coco.yaml') as f: +# d = yaml.load(f, Loader=yaml.FullLoader) # dict +# for i, x in enumerate(d['names']): +# print(i, x) diff --git a/data/coco128.yaml b/data/coco128.yaml new file mode 100644 index 0000000..c41bccf --- /dev/null +++ b/data/coco128.yaml @@ -0,0 +1,28 @@ +# COCO 2017 dataset http://cocodataset.org - first 128 training images +# Train command: python train.py --data coco128.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /coco128 +# /yolov5 + + +# download command/URL (optional) +download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: ../coco128/images/train2017/ # 128 images +val: ../coco128/images/train2017/ # 128 images + +# number of classes +nc: 80 + +# class names +names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush' ] diff --git a/data/hyp.finetune.yaml b/data/hyp.finetune.yaml new file mode 100644 index 0000000..1b84cff --- /dev/null +++ b/data/hyp.finetune.yaml @@ -0,0 +1,38 @@ +# Hyperparameters for VOC finetuning +# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + + +# Hyperparameter Evolution Results +# Generations: 306 +# P R mAP.5 mAP.5:.95 box obj cls +# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146 + +lr0: 0.0032 +lrf: 0.12 +momentum: 0.843 +weight_decay: 0.00036 +warmup_epochs: 2.0 +warmup_momentum: 0.5 +warmup_bias_lr: 0.05 +box: 0.0296 +cls: 0.243 +cls_pw: 0.631 +obj: 0.301 +obj_pw: 0.911 +iou_t: 0.2 +anchor_t: 2.91 +# anchors: 3.63 +fl_gamma: 0.0 +hsv_h: 0.0138 +hsv_s: 0.664 +hsv_v: 0.464 +degrees: 0.373 +translate: 0.245 +scale: 0.898 +shear: 0.602 +perspective: 0.0 +flipud: 0.00856 +fliplr: 0.5 +mosaic: 1.0 +mixup: 0.243 diff --git a/data/hyp.scratch.yaml b/data/hyp.scratch.yaml new file mode 100644 index 0000000..4973735 --- /dev/null +++ b/data/hyp.scratch.yaml @@ -0,0 +1,34 @@ +# Hyperparameters for COCO training from scratch +# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.5 # cls loss gain +landmark: 0.005 # landmark loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 1.0 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.5 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 0.5 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) diff --git a/data/plateAndCar.yaml b/data/plateAndCar.yaml new file mode 100644 index 0000000..ba73862 --- /dev/null +++ b/data/plateAndCar.yaml @@ -0,0 +1,20 @@ +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ +# Train command: python train.py --data voc.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /VOC +# /yolov5 + + +# download command/URL (optional) +download: bash data/scripts/get_voc.sh + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: /mnt/Gpan/Mydata/pytorch +Porject/datasets/ccpd/train_car_plate/train_detect +val: /mnt/Gpan/Mydata/pytorchPorject/datasets/ccpd/train_car_plate/val_detect +# number of classes +nc: 3 + +# class names +names: [ 'single_plate','double_plate','car'] diff --git a/data/retinaface2yolo.py b/data/retinaface2yolo.py new file mode 100644 index 0000000..b51d382 --- /dev/null +++ b/data/retinaface2yolo.py @@ -0,0 +1,150 @@ +import os +import os.path +import sys +import torch +import torch.utils.data as data +import cv2 +import numpy as np + +class WiderFaceDetection(data.Dataset): + def __init__(self, txt_path, preproc=None): + self.preproc = preproc + self.imgs_path = [] + self.words = [] + f = open(txt_path,'r') + lines = f.readlines() + isFirst = True + labels = [] + for line in lines: + line = line.rstrip() + if line.startswith('#'): + if isFirst is True: + isFirst = False + else: + labels_copy = labels.copy() + self.words.append(labels_copy) + labels.clear() + path = line[2:] + path = txt_path.replace('label.txt','images/') + path + self.imgs_path.append(path) + else: + line = line.split(' ') + label = [float(x) for x in line] + labels.append(label) + + self.words.append(labels) + + def __len__(self): + return len(self.imgs_path) + + def __getitem__(self, index): + img = cv2.imread(self.imgs_path[index]) + height, width, _ = img.shape + + labels = self.words[index] + annotations = np.zeros((0, 15)) + if len(labels) == 0: + return annotations + for idx, label in enumerate(labels): + annotation = np.zeros((1, 15)) + # bbox + annotation[0, 0] = label[0] # x1 + annotation[0, 1] = label[1] # y1 + annotation[0, 2] = label[0] + label[2] # x2 + annotation[0, 3] = label[1] + label[3] # y2 + + # landmarks + annotation[0, 4] = label[4] # l0_x + annotation[0, 5] = label[5] # l0_y + annotation[0, 6] = label[7] # l1_x + annotation[0, 7] = label[8] # l1_y + annotation[0, 8] = label[10] # l2_x + annotation[0, 9] = label[11] # l2_y + annotation[0, 10] = label[13] # l3_x + annotation[0, 11] = label[14] # l3_y + annotation[0, 12] = label[16] # l4_x + annotation[0, 13] = label[17] # l4_y + if (annotation[0, 4]<0): + annotation[0, 14] = -1 + else: + annotation[0, 14] = 1 + + annotations = np.append(annotations, annotation, axis=0) + target = np.array(annotations) + if self.preproc is not None: + img, target = self.preproc(img, target) + + return torch.from_numpy(img), target + +def detection_collate(batch): + """Custom collate fn for dealing with batches of images that have a different + number of associated object annotations (bounding boxes). + + Arguments: + batch: (tuple) A tuple of tensor images and lists of annotations + + Return: + A tuple containing: + 1) (tensor) batch of images stacked on their 0 dim + 2) (list of tensors) annotations for a given image are stacked on 0 dim + """ + targets = [] + imgs = [] + for _, sample in enumerate(batch): + for _, tup in enumerate(sample): + if torch.is_tensor(tup): + imgs.append(tup) + elif isinstance(tup, type(np.empty(0))): + annos = torch.from_numpy(tup).float() + targets.append(annos) + + return (torch.stack(imgs, 0), targets) + +save_path = '/ssd_1t/derron/yolov5-face/data/widerface/train' +aa=WiderFaceDetection("/ssd_1t/derron/yolov5-face/data/widerface/widerface/train/label.txt") +for i in range(len(aa.imgs_path)): + print(i, aa.imgs_path[i]) + img = cv2.imread(aa.imgs_path[i]) + base_img = os.path.basename(aa.imgs_path[i]) + base_txt = os.path.basename(aa.imgs_path[i])[:-4] +".txt" + save_img_path = os.path.join(save_path, base_img) + save_txt_path = os.path.join(save_path, base_txt) + with open(save_txt_path, "w") as f: + height, width, _ = img.shape + labels = aa.words[i] + annotations = np.zeros((0, 14)) + if len(labels) == 0: + continue + for idx, label in enumerate(labels): + annotation = np.zeros((1, 14)) + # bbox + label[0] = max(0, label[0]) + label[1] = max(0, label[1]) + label[2] = min(width - 1, label[2]) + label[3] = min(height - 1, label[3]) + annotation[0, 0] = (label[0] + label[2] / 2) / width # cx + annotation[0, 1] = (label[1] + label[3] / 2) / height # cy + annotation[0, 2] = label[2] / width # w + annotation[0, 3] = label[3] / height # h + #if (label[2] -label[0]) < 8 or (label[3] - label[1]) < 8: + # img[int(label[1]):int(label[3]), int(label[0]):int(label[2])] = 127 + # continue + # landmarks + annotation[0, 4] = label[4] / width # l0_x + annotation[0, 5] = label[5] / height # l0_y + annotation[0, 6] = label[7] / width # l1_x + annotation[0, 7] = label[8] / height # l1_y + annotation[0, 8] = label[10] / width # l2_x + annotation[0, 9] = label[11] / height # l2_y + annotation[0, 10] = label[13] / width # l3_x + annotation[0, 11] = label[14] / height # l3_y + annotation[0, 12] = label[16] / width # l4_x + annotation[0, 13] = label[17] / height # l4_y + str_label="0 " + for i in range(len(annotation[0])): + str_label =str_label+" "+str(annotation[0][i]) + str_label = str_label.replace('[', '').replace(']', '') + str_label = str_label.replace(',', '') + '\n' + f.write(str_label) + cv2.imwrite(save_img_path, img) + diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh new file mode 100644 index 0000000..caec61e --- /dev/null +++ b/data/scripts/get_argoverse_hd.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Download command: bash data/scripts/get_argoverse_hd.sh +# Train command: python train.py --data argoverse_hd.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /argoverse +# /yolov5 + +# Download/unzip images +d='../argoverse/' # unzip directory +mkdir $d +url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ +f=Argoverse-HD-Full.zip +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background +wait # finish background tasks + +cd ../argoverse/Argoverse-1.1/ +ln -s tracking images + +cd ../Argoverse-HD/annotations/ + +python3 - "$@" <train.txt +cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt + +python3 - "$@" < 3: + print('Too many arguments were provided.') + print('Run command: python3 train2yolo.py /path/to/original/widerface/train [/path/to/save/widerface/train]') + exit(1) + original_path = sys.argv[1] + + if len(sys.argv) == 2: + if not os.path.isdir('widerface'): + os.mkdir('widerface') + if not os.path.isdir('widerface/train'): + os.mkdir('widerface/train') + + save_path = 'widerface/train' + else: + save_path = sys.argv[2] + + if not os.path.isfile(os.path.join(original_path, 'label.txt')): + print('Missing label.txt file.') + exit(1) + + aa = WiderFaceDetection(os.path.join(original_path, 'label.txt')) + + for i in range(len(aa.imgs_path)): + print(i, aa.imgs_path[i]) + img = cv2.imread(aa.imgs_path[i]) + base_img = os.path.basename(aa.imgs_path[i]) + base_txt = os.path.basename(aa.imgs_path[i])[:-4] + ".txt" + save_img_path = os.path.join(save_path, base_img) + save_txt_path = os.path.join(save_path, base_txt) + with open(save_txt_path, "w") as f: + height, width, _ = img.shape + labels = aa.words[i] + annotations = np.zeros((0, 14)) + if len(labels) == 0: + continue + for idx, label in enumerate(labels): + annotation = np.zeros((1, 14)) + # bbox + label[0] = max(0, label[0]) + label[1] = max(0, label[1]) + label[2] = min(width - 1, label[2]) + label[3] = min(height - 1, label[3]) + annotation[0, 0] = (label[0] + label[2] / 2) / width # cx + annotation[0, 1] = (label[1] + label[3] / 2) / height # cy + annotation[0, 2] = label[2] / width # w + annotation[0, 3] = label[3] / height # h + #if (label[2] -label[0]) < 8 or (label[3] - label[1]) < 8: + # img[int(label[1]):int(label[3]), int(label[0]):int(label[2])] = 127 + # continue + # landmarks + annotation[0, 4] = label[4] / width # l0_x + annotation[0, 5] = label[5] / height # l0_y + annotation[0, 6] = label[7] / width # l1_x + annotation[0, 7] = label[8] / height # l1_y + annotation[0, 8] = label[10] / width # l2_x + annotation[0, 9] = label[11] / height # l2_y + annotation[0, 10] = label[13] / width # l3_x + annotation[0, 11] = label[14] / height # l3_y + annotation[0, 12] = label[16] / width # l4_x + annotation[0, 13] = label[17] / height # l4_yca + str_label = "0 " + for i in range(len(annotation[0])): + str_label = str_label + " " + str(annotation[0][i]) + str_label = str_label.replace('[', '').replace(']', '') + str_label = str_label.replace(',', '') + '\n' + f.write(str_label) + cv2.imwrite(save_img_path, img) diff --git a/data/val2yolo.py b/data/val2yolo.py new file mode 100644 index 0000000..ddd8eda --- /dev/null +++ b/data/val2yolo.py @@ -0,0 +1,88 @@ +import os +import cv2 +import numpy as np +import shutil +import sys +from tqdm import tqdm + + +def xywh2xxyy(box): + x1 = box[0] + y1 = box[1] + x2 = box[0] + box[2] + y2 = box[1] + box[3] + return x1, x2, y1, y2 + + +def convert(size, box): + dw = 1. / (size[0]) + dh = 1. / (size[1]) + x = (box[0] + box[1]) / 2.0 - 1 + y = (box[2] + box[3]) / 2.0 - 1 + w = box[1] - box[0] + h = box[3] - box[2] + x = x * dw + w = w * dw + y = y * dh + h = h * dh + return x, y, w, h + + +def wider2face(root, phase='val', ignore_small=0): + data = {} + with open('{}/{}/label.txt'.format(root, phase), 'r') as f: + lines = f.readlines() + for line in tqdm(lines): + line = line.strip() + if '#' in line: + path = '{}/{}/images/{}'.format(root, phase, line.split()[-1]) + img = cv2.imread(path) + height, width, _ = img.shape + data[path] = list() + else: + box = np.array(line.split()[0:4], dtype=np.float32) # (x1,y1,w,h) + if box[2] < ignore_small or box[3] < ignore_small: + continue + box = convert((width, height), xywh2xxyy(box)) + label = '0 {} {} {} {} -1 -1 -1 -1 -1 -1 -1 -1 -1 -1'.format(round(box[0], 4), round(box[1], 4), + round(box[2], 4), round(box[3], 4)) + data[path].append(label) + return data + + +if __name__ == '__main__': + if len(sys.argv) == 1: + print('Missing path to WIDERFACE folder.') + print('Run command: python3 val2yolo.py /path/to/original/widerface [/path/to/save/widerface/val]') + exit(1) + elif len(sys.argv) > 3: + print('Too many arguments were provided.') + print('Run command: python3 val2yolo.py /path/to/original/widerface [/path/to/save/widerface/val]') + exit(1) + + root_path = sys.argv[1] + if not os.path.isfile(os.path.join(root_path, 'val', 'label.txt')): + print('Missing label.txt file.') + exit(1) + + if len(sys.argv) == 2: + if not os.path.isdir('widerface'): + os.mkdir('widerface') + if not os.path.isdir('widerface/val'): + os.mkdir('widerface/val') + + save_path = 'widerface/val' + else: + save_path = sys.argv[2] + + datas = wider2face(root_path, phase='val') + for idx, data in enumerate(datas.keys()): + pict_name = os.path.basename(data) + out_img = f'{save_path}/{idx}.jpg' + out_txt = f'{save_path}/{idx}.txt' + shutil.copyfile(data, out_img) + labels = datas[data] + f = open(out_txt, 'w') + for label in labels: + f.write(label + '\n') + f.close() diff --git a/data/val2yolo_for_test.py b/data/val2yolo_for_test.py new file mode 100644 index 0000000..0790c60 --- /dev/null +++ b/data/val2yolo_for_test.py @@ -0,0 +1,65 @@ +import os +import cv2 +import numpy as np +import shutil +from tqdm import tqdm + +root = '/ssd_1t/derron/WiderFace' + + +def xywh2xxyy(box): + x1 = box[0] + y1 = box[1] + x2 = box[0] + box[2] + y2 = box[1] + box[3] + return (x1, x2, y1, y2) + + +def convert(size, box): + dw = 1. / (size[0]) + dh = 1. / (size[1]) + x = (box[0] + box[1]) / 2.0 - 1 + y = (box[2] + box[3]) / 2.0 - 1 + w = box[1] - box[0] + h = box[3] - box[2] + x = x * dw + w = w * dw + y = y * dh + h = h * dh + return (x, y, w, h) + + +def wider2face(phase='val', ignore_small=0): + data = {} + with open('{}/{}/label.txt'.format(root, phase), 'r') as f: + lines = f.readlines() + for line in tqdm(lines): + line = line.strip() + if '#' in line: + path = '{}/{}/images/{}'.format(root, phase, os.path.basename(line)) + img = cv2.imread(path) + height, width, _ = img.shape + data[path] = list() + else: + box = np.array(line.split()[0:4], dtype=np.float32) # (x1,y1,w,h) + if box[2] < ignore_small or box[3] < ignore_small: + continue + box = convert((width, height), xywh2xxyy(box)) + label = '0 {} {} {} {} -1 -1 -1 -1 -1 -1 -1 -1 -1 -1'.format(round(box[0], 4), round(box[1], 4), + round(box[2], 4), round(box[3], 4)) + data[path].append(label) + return data + + +if __name__ == '__main__': + datas = wider2face('val') + for idx, data in enumerate(datas.keys()): + pict_name = os.path.basename(data) + out_img = 'widerface/val/images/{}'.format(pict_name) + out_txt = 'widerface/val/labels/{}.txt'.format(os.path.splitext(pict_name)[0]) + shutil.copyfile(data, out_img) + labels = datas[data] + f = open(out_txt, 'w') + for label in labels: + f.write(label + '\n') + f.close() diff --git a/data/voc.yaml b/data/voc.yaml new file mode 100644 index 0000000..851a9e0 --- /dev/null +++ b/data/voc.yaml @@ -0,0 +1,21 @@ +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ +# Train command: python train.py --data voc.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /VOC +# /yolov5 + + +# download command/URL (optional) +download: bash data/scripts/get_voc.sh + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: ../VOC/images/train/ # 16551 images +val: ../VOC/images/val/ # 4952 images + +# number of classes +nc: 20 + +# class names +names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] diff --git a/data/widerface.yaml b/data/widerface.yaml new file mode 100644 index 0000000..0305831 --- /dev/null +++ b/data/widerface.yaml @@ -0,0 +1,19 @@ +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ +# Train command: python train.py --data voc.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /VOC +# /yolov5 + + +# download command/URL (optional) +download: bash data/scripts/get_voc.sh + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: /mnt/Gpan/Mydata/pytorchPorject/yolov5-face/ccpd/train_detect +val: /mnt/Gpan/Mydata/pytorchPorject/yolov5-face/ccpd/val_detect +# number of classes +nc: 2 + +# class names +names: [ 'single','double'] diff --git a/demo.sh b/demo.sh new file mode 100644 index 0000000..9384b42 --- /dev/null +++ b/demo.sh @@ -0,0 +1,3 @@ +python detect_plate_hongkang.py --image_path gangao --img_size 640 --detect_model runs/train/exp32/weights/best.pt --rec_model /mnt/Gpan/Mydata/pytorchPorject/yolov7-face/weights/plate_rec.pth +# runs/train/exp26/weights/last.ptimgs +# /mnt/Gpan/Mydata/pytorchPorject/datasets/ccpd/train_detect/gangao \ No newline at end of file diff --git a/detect_demo.py b/detect_demo.py new file mode 100644 index 0000000..6a21676 --- /dev/null +++ b/detect_demo.py @@ -0,0 +1,223 @@ +# -*- coding: UTF-8 -*- +import argparse +import time +import os +import cv2 +import torch +from numpy import random +import copy +import numpy as np +from models.experimental import attempt_load +from utils.datasets import letterbox +from utils.general import check_img_size, non_max_suppression_face, scale_coords + +from utils.torch_utils import time_synchronized +from utils.cv_puttext import cv2ImgAddText +from plate_recognition.plate_rec import get_plate_result,allFilePath,cv_imread + +from plate_recognition.double_plate_split_merge import get_split_merge + +clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)] + +def load_model(weights, device): + model = attempt_load(weights, map_location=device) # load FP32 model + return model + + +def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2, 4, 6]] -= pad[0] # x padding + coords[:, [1, 3, 5, 7]] -= pad[1] # y padding + coords[:, :10] /= gain + #clip_coords(coords, img0_shape) + coords[:, 0].clamp_(0, img0_shape[1]) # x1 + coords[:, 1].clamp_(0, img0_shape[0]) # y1 + coords[:, 2].clamp_(0, img0_shape[1]) # x2 + coords[:, 3].clamp_(0, img0_shape[0]) # y2 + coords[:, 4].clamp_(0, img0_shape[1]) # x3 + coords[:, 5].clamp_(0, img0_shape[0]) # y3 + coords[:, 6].clamp_(0, img0_shape[1]) # x4 + coords[:, 7].clamp_(0, img0_shape[0]) # y4 + # coords[:, 8].clamp_(0, img0_shape[1]) # x5 + # coords[:, 9].clamp_(0, img0_shape[0]) # y5 + return coords + + + + +def get_plate_rec_landmark(img, xyxy, conf, landmarks, class_num,device): + h,w,c = img.shape + result_dict={} + tl = 1 or round(0.002 * (h + w) / 2) + 1 # line/font thickness + + x1 = int(xyxy[0]) + y1 = int(xyxy[1]) + x2 = int(xyxy[2]) + y2 = int(xyxy[3]) + landmarks_np=np.zeros((4,2)) + rect=[x1,y1,x2,y2] + for i in range(4): + point_x = int(landmarks[2 * i]) + point_y = int(landmarks[2 * i + 1]) + landmarks_np[i]=np.array([point_x,point_y]) + + class_label= int(class_num) #车牌的的类型0代表单牌,1代表双层车牌 + result_dict['rect']=rect + result_dict['landmarks']=landmarks_np.tolist() + result_dict['class']=class_label + return result_dict + + + +def detect_plate(model, orgimg, device,img_size): + # Load model + # img_size = opt_img_size + conf_thres = 0.3 + iou_thres = 0.5 + dict_list=[] + # orgimg = cv2.imread(image_path) # BGR + img0 = copy.deepcopy(orgimg) + assert orgimg is not None, 'Image Not Found ' + h0, w0 = orgimg.shape[:2] # orig hw + r = img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR + img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp) + + imgsz = check_img_size(img_size, s=model.stride.max()) # check img_size + + img = letterbox(img0, new_shape=imgsz)[0] + # img =process_data(img0) + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416 + + # Run inference + t0 = time.time() + + img = torch.from_numpy(img).to(device) + img = img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + # Inference + t1 = time_synchronized() + pred = model(img)[0] + t2=time_synchronized() + # print(f"infer time is {(t2-t1)*1000} ms") + + # Apply NMS + pred = non_max_suppression_face(pred, conf_thres, iou_thres) + + # print('img.shape: ', img.shape) + # print('orgimg.shape: ', orgimg.shape) + + # Process detections + for i, det in enumerate(pred): # detections per image + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], orgimg.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + + det[:, 5:13] = scale_coords_landmarks(img.shape[2:], det[:, 5:13], orgimg.shape).round() + + for j in range(det.size()[0]): + xyxy = det[j, :4].view(-1).tolist() + conf = det[j, 4].cpu().numpy() + landmarks = det[j, 5:13].view(-1).tolist() + class_num = det[j, 13].cpu().numpy() + result_dict = get_plate_rec_landmark(orgimg, xyxy, conf, landmarks, class_num,device) + dict_list.append(result_dict) + return dict_list + # cv2.imwrite('result.jpg', orgimg) + + + +def draw_result(orgimg,dict_list): + result_str ="" + for result in dict_list: + rect_area = result['rect'] + + x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1] + padding_w = 0.05*w + padding_h = 0.11*h + rect_area[0]=max(0,int(x-padding_w)) + rect_area[1]=max(0,int(y-padding_h)) + rect_area[2]=min(orgimg.shape[1],int(rect_area[2]+padding_w)) + rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h)) + + + landmarks=result['landmarks'] + label=result['class'] + # result_str+=result+" " + for i in range(4): #关键点 + cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) + cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),clors[label],2) #画框 + cv2.putText(img,str(label),(rect_area[0],rect_area[1]),cv2.FONT_HERSHEY_SIMPLEX,0.5,clors[label],2) + # orgimg=cv2ImgAddText(orgimg,label,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area) + # print(result_str) + return orgimg +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--detect_model', nargs='+', type=str, default='weights/plate_detect.pt', help='model.pt path(s)') #检测模型 + parser.add_argument('--image_path', type=str, default=r'D:\Project\ChePai\test\images\val', help='source') + parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--output', type=str, default='result1', help='source') + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + # device =torch.device("cpu") + opt = parser.parse_args() + print(opt) + save_path = opt.output + count=0 + if not os.path.exists(save_path): + os.mkdir(save_path) + + detect_model = load_model(opt.detect_model, device) #初始化检测模型 + time_all = 0 + time_begin=time.time() + if not os.path.isfile(opt.image_path): #目录 + file_list=[] + allFilePath(opt.image_path,file_list) + for img_path in file_list: + + print(count,img_path) + time_b = time.time() + img =cv_imread(img_path) + + if img is None: + continue + if img.shape[-1]==4: + img=cv2.cvtColor(img,cv2.COLOR_BGRA2BGR) + # detect_one(model,img_path,device) + dict_list=detect_plate(detect_model, img, device,opt.img_size) + ori_img=draw_result(img,dict_list) + img_name = os.path.basename(img_path) + save_img_path = os.path.join(save_path,img_name) + time_e=time.time() + time_gap = time_e-time_b + if count: + time_all+=time_gap + cv2.imwrite(save_img_path,ori_img) + count+=1 + else: #单个图片 + print(count,opt.image_path,end=" ") + img =cv_imread(opt.image_path) + if img.shape[-1]==4: + img=cv2.cvtColor(img,cv2.COLOR_BGRA2BGR) + # detect_one(model,img_path,device) + dict_list=detect_plate(detect_model, img, device,opt.img_size) + ori_img=draw_result(img,dict_list) + img_name = os.path.basename(opt.image_path) + save_img_path = os.path.join(save_path,img_name) + cv2.imwrite(save_img_path,ori_img) + print(f"sumTime time is {time.time()-time_begin} s, average pic time is {time_all/(len(file_list)-1)}") \ No newline at end of file diff --git a/detect_plate.py b/detect_plate.py new file mode 100644 index 0000000..00b9b47 --- /dev/null +++ b/detect_plate.py @@ -0,0 +1,372 @@ +# -*- coding: UTF-8 -*- +import argparse +import time +from pathlib import Path +import os +import cv2 +import torch +import torch.backends.cudnn as cudnn +from numpy import random +import copy +import numpy as np +from models.experimental import attempt_load +from utils.datasets import letterbox +from utils.general import check_img_size, non_max_suppression_face, apply_classifier, scale_coords, xyxy2xywh, \ + strip_optimizer, set_logging, increment_path +from utils.plots import plot_one_box +from utils.torch_utils import select_device, load_classifier, time_synchronized +from utils.cv_puttext import cv2ImgAddText +from plate_recognition.plate_rec import get_plate_result,allFilePath,init_model,cv_imread +# from plate_recognition.plate_cls import cv_imread +from plate_recognition.double_plate_split_merge import get_split_merge + +clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)] +danger=['危','险'] +def order_points(pts): #四个点按照左上 右上 右下 左下排列 + rect = np.zeros((4, 2), dtype = "float32") + s = pts.sum(axis = 1) + rect[0] = pts[np.argmin(s)] + rect[2] = pts[np.argmax(s)] + diff = np.diff(pts, axis = 1) + rect[1] = pts[np.argmin(diff)] + rect[3] = pts[np.argmax(diff)] + return rect + + +def four_point_transform(image, pts): #透视变换得到车牌小图 + # rect = order_points(pts) + rect = pts.astype('float32') + (tl, tr, br, bl) = rect + widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) + widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) + maxWidth = max(int(widthA), int(widthB)) + heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) + heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) + maxHeight = max(int(heightA), int(heightB)) + dst = np.array([ + [0, 0], + [maxWidth - 1, 0], + [maxWidth - 1, maxHeight - 1], + [0, maxHeight - 1]], dtype = "float32") + M = cv2.getPerspectiveTransform(rect, dst) + warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) + return warped + +def load_model(weights, device): #加载检测模型 + model = attempt_load(weights, map_location=device) # load FP32 model + return model + +def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None): #返回到原图坐标 + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2, 4, 6]] -= pad[0] # x padding + coords[:, [1, 3, 5, 7]] -= pad[1] # y padding + coords[:, :8] /= gain + #clip_coords(coords, img0_shape) + coords[:, 0].clamp_(0, img0_shape[1]) # x1 + coords[:, 1].clamp_(0, img0_shape[0]) # y1 + coords[:, 2].clamp_(0, img0_shape[1]) # x2 + coords[:, 3].clamp_(0, img0_shape[0]) # y2 + coords[:, 4].clamp_(0, img0_shape[1]) # x3 + coords[:, 5].clamp_(0, img0_shape[0]) # y3 + coords[:, 6].clamp_(0, img0_shape[1]) # x4 + coords[:, 7].clamp_(0, img0_shape[0]) # y4 + # coords[:, 8].clamp_(0, img0_shape[1]) # x5 + # coords[:, 9].clamp_(0, img0_shape[0]) # y5 + return coords + + +def get_plate_rec_landmark(img, xyxy, conf, landmarks, class_num,device,plate_rec_model,is_color=False): #获取车牌坐标以及四个角点坐标并获取车牌号 + h,w,c = img.shape + Box={} + result_dict={} + tl = 1 or round(0.002 * (h + w) / 2) + 1 # line/font thickness + + x1 = int(xyxy[0]) + y1 = int(xyxy[1]) + x2 = int(xyxy[2]) + y2 = int(xyxy[3]) + height=y2-y1 + landmarks_np=np.zeros((4,2)) + rect=[x1,y1,x2,y2] + for i in range(4): + point_x = int(landmarks[2 * i]) + point_y = int(landmarks[2 * i + 1]) + landmarks_np[i]=np.array([point_x,point_y]) + + class_label= int(class_num) #车牌的的类型0代表单牌,1代表双层车牌 + roi_img = four_point_transform(img,landmarks_np) #透视变换得到车牌小图 + if class_label: #判断是否是双层车牌,是双牌的话进行分割后然后拼接 + roi_img=get_split_merge(roi_img) + if not is_color: + plate_number,rec_prob = get_plate_result(roi_img,device,plate_rec_model,is_color=is_color) #对车牌小图进行识别 + else: + plate_number,rec_prob,plate_color,color_conf=get_plate_result(roi_img,device,plate_rec_model,is_color=is_color) + # cv2.imwrite("roi.jpg",roi_img) + # result_dict['Score']=conf #检测区域得分 + Box['X']=landmarks_np[0][0].tolist() #车牌角点坐标 + Box['Y']=landmarks_np[0][1].tolist() + Box['Width']=rect[2]-rect[0] + Box['Height']=rect[3]-rect[1] + Box['label']=plate_number #车牌号 + Box['rect']=rect + result_dict['rect'] = rect # 车牌roi区域 + result_dict['detect_conf'] = conf # 检测区域得分 + result_dict['landmarks'] = landmarks_np.tolist() # 车牌角点坐标 + result_dict['plate_no'] = plate_number # 车牌号 + result_dict['rec_conf'] = rec_prob # 每个字符的概率 + result_dict['roi_height'] = roi_img.shape[0] # 车牌高度 + result_dict['plate_color'] = "" + if is_color: + result_dict['plate_color'] = plate_color # 车牌颜色 + result_dict['color_conf'] = color_conf # 颜色得分 + result_dict['plate_type'] = class_label # 单双层 0单层 1双层 + score = conf.tolist() + return plate_number, score, Box,result_dict + + + +def detect_Recognition_plate(model, orgimg, device,plate_rec_model,img_size,is_color=False):#获取车牌信息 + # Load model + # img_size = opt_img_size + conf_thres = 0.3 #得分阈值 + iou_thres = 0.5 #nms的iou值 + dict_list=[] + result_jpg=[] + # orgimg = cv2.imread(image_path) # BGR + img0 = copy.deepcopy(orgimg) + assert orgimg is not None, 'Image Not Found ' + h0, w0 = orgimg.shape[:2] # orig hw + r = img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR + img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp) + + imgsz = check_img_size(img_size, s=model.stride.max()) # check img_size + + img = letterbox(img0, new_shape=imgsz)[0] #检测前处理,图片长宽变为32倍数,比如变为640X640 + # img =process_data(img0) + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416 图片的BGR排列转为RGB,然后将图片的H,W,C排列变为C,H,W排列 + + # Run inference + t0 = time.time() + + img = torch.from_numpy(img).to(device) + img = img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + # Inference + # t1 = time_synchronized()/ + pred = model(img)[0] + # t2=time_synchronized() + # print(f"infer time is {(t2-t1)*1000} ms") + + # Apply NMS + pred = non_max_suppression_face(pred, conf_thres, iou_thres) + result_jpg.insert(0, pred[0].tolist()) + # print('img.shape: ', img.shape) + # print('orgimg.shape: ', orgimg.shape) + # Process detections + for i, det in enumerate(pred): # detections per image + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], orgimg.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + + det[:, 5:13] = scale_coords_landmarks(img.shape[2:], det[:, 5:13], orgimg.shape).round() + + for j in range(det.size()[0]): + xyxy = det[j, :4].view(-1).tolist() + conf = det[j, 4].cpu().numpy() + landmarks = det[j, 5:13].view(-1).tolist() + class_num = det[j, 13].cpu().numpy() + label,score,Box,result_dict = get_plate_rec_landmark(orgimg, xyxy, conf, landmarks, class_num,device,plate_rec_model,is_color=is_color) + dict_list.append(result_dict) + result_jpg.append(Box) + result_jpg.append(score) + result_jpg.append(label) + + return dict_list, result_jpg + # cv2.imwrite('result.jpg', orgimg) + +def draw_result(orgimg,dict_list,is_color=False): # 车牌结果画出来 + result_str ="" + for result in dict_list: + rect_area = result['rect'] + + x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1] + padding_w = 0.05*w + padding_h = 0.11*h + rect_area[0]=max(0,int(x-padding_w)) + rect_area[1]=max(0,int(y-padding_h)) + rect_area[2]=min(orgimg.shape[1],int(rect_area[2]+padding_w)) + rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h)) + + height_area = result['roi_height'] + landmarks=result['landmarks'] + result_p = result['plate_no'] + if result['plate_type']==0:#单层 + result_p+=" "+result['plate_color'] + else: #双层 + result_p+=" "+result['plate_color']+"双层" + result_str+=result_p+" " + for i in range(4): #关键点 + cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) + cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),(0,0,255),2) #画框 + + labelSize = cv2.getTextSize(result_p,cv2.FONT_HERSHEY_SIMPLEX,0.5,1) #获得字体的大小 + if rect_area[0]+labelSize[0][0]>orgimg.shape[1]: #防止显示的文字越界 + rect_area[0]=int(orgimg.shape[1]-labelSize[0][0]) + orgimg=cv2.rectangle(orgimg,(rect_area[0],int(rect_area[1]-round(1.6*labelSize[0][1]))),(int(rect_area[0]+round(1.2*labelSize[0][0])),rect_area[1]+labelSize[1]),(255,255,255),cv2.FILLED)#画文字框,背景白色 + + if len(result)>=1: + orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0],int(rect_area[1]-round(1.6*labelSize[0][1])),(0,0,0),21) + # orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area) + + print(result_str) # 打印结果 + return orgimg, result_str + + + +def get_second(capture): + if capture.isOpened(): + rate = capture.get(5) # 帧速率 + FrameNumber = capture.get(7) # 视频文件的帧数 + duration = FrameNumber/rate # 帧速率/视频总帧数 是时间,除以60之后单位是分钟 + return int(rate),int(FrameNumber),int(duration) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--detect_model', nargs='+', type=str, default='weights/plate_detect.pt', help='model.pt path(s)') #检测模型 + parser.add_argument('--rec_model', type=str, default='weights/plate_rec_color.pth', help='model.pt path(s)')#车牌识别+颜色识别模型 + parser.add_argument('--is_color',type=bool,default=True,help='plate color') #是否识别颜色 + parser.add_argument('--image_path', type=str, default=r'D:\Project\ChePai\test\images\val\20230331163841.jpg', help='source') #图片路径 + parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)') #网络输入图片大小 + parser.add_argument('--output', type=str, default='result', help='source') #图片结果保存的位置 + parser.add_argument('--video', type=str, default='', help='source') #视频的路径 + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #使用gpu还是cpu进行识别 + # device =torch.device("cpu") + opt = parser.parse_args() + print(opt) + save_path = opt.output + count=0 + if not os.path.exists(save_path): + os.mkdir(save_path) + + detect_model = load_model(opt.detect_model, device) #初始化检测模型 + plate_rec_model=init_model(device,opt.rec_model,is_color=opt.is_color) #初始化识别模型 + #算参数量 + total = sum(p.numel() for p in detect_model.parameters()) + total_1 = sum(p.numel() for p in plate_rec_model.parameters()) + print("detect params: %.2fM,rec params: %.2fM" % (total/1e6,total_1/1e6)) + + # plate_color_model =init_color_model(opt.color_model,device) + time_all = 0 + time_begin=time.time() + if not opt.video: #处理图片 + if not os.path.isfile(opt.image_path): #目录 + file_list=[] + allFilePath(opt.image_path,file_list) #将这个目录下的所有图片文件路径读取到file_list里面 + for img_path in file_list: #遍历图片文件 + + print(count,img_path,end=" ") + time_b = time.time() #开始时间 + img =cv_imread(img_path) #opencv 读取图片 + + if img is None: + continue + if img.shape[-1]==4: #图片如果是4个通道的,将其转为3个通道 + img=cv2.cvtColor(img,cv2.COLOR_BGRA2BGR) + # detect_one(model,img_path,device) + dict_list=detect_Recognition_plate(detect_model, img, device,plate_rec_model,opt.img_size,is_color=opt.is_color)#检测以及识别车牌 + ori_img, str_result=draw_result(img,dict_list) #将结果画在图上 + print(str_result) + img_name = os.path.basename(img_path) + save_img_path = os.path.join(save_path,img_name) #图片保存的路径 + time_e=time.time() + time_gap = time_e-time_b #计算单个图片识别耗时 + if count: + time_all+=time_gap + cv2.imwrite(save_img_path,ori_img) #opencv将识别的图片保存 + count+=1 + print(f"sumTime time is {time.time()-time_begin} s, average pic time is {time_all/(len(file_list)-1)}") + else: #单个图片 + print(count,opt.image_path,end=" ") + img =cv_imread(opt.image_path) + if img.shape[-1]==4: + img=cv2.cvtColor(img,cv2.COLOR_BGRA2BGR) + # detect_one(model,img_path,device) + dict_list, result_jpg=detect_Recognition_plate(detect_model, img, device,plate_rec_model,opt.img_size,is_color=opt.is_color) + ori_img=draw_result(img,dict_list) + ori_list=ori_img[0].tolist() + result_jpg.insert(0,ori_list) + img_name = os.path.basename(opt.image_path) + save_img_path = os.path.join(save_path,img_name) + cv2.imwrite(save_img_path,ori_img) + + + else: #处理视频 + video_name = opt.video + capture=cv2.VideoCapture(video_name) + fourcc = cv2.VideoWriter_fourcc(*'MP4V') + fps = capture.get(cv2.CAP_PROP_FPS) # 帧数 + width, height = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 宽高 + out = cv2.VideoWriter('result.mp4', fourcc, fps, (width, height)) # 写入视频 + frame_count = 0 + fps_all=0 + rate,FrameNumber,duration=get_second(capture) + if capture.isOpened(): + while True: + t1 = cv2.getTickCount() + frame_count+=1 + print(f"第{frame_count} 帧",end=" ") + ret,img=capture.read() + if not ret: + break + # if frame_count%rate==0: + img0 = copy.deepcopy(img) + dict_list=detect_Recognition_plate(detect_model, img, device,plate_rec_model,opt.img_size,is_color=opt.is_color) + ori_img=draw_result(img,dict_list) + t2 =cv2.getTickCount() + infer_time =(t2-t1)/cv2.getTickFrequency() + fps=1.0/infer_time + fps_all+=fps + str_fps = f'fps:{fps:.4f}' + + cv2.putText(ori_img,str_fps,(20,20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2) + # cv2.imshow("haha",ori_img) + # cv2.waitKey(1) + out.write(ori_img) + + # current_time = int(frame_count/FrameNumber*duration) + # sec = current_time%60 + # minute = current_time//60 + # for result_ in result_list: + # plate_no = result_['plate_no'] + # if not is_car_number(pattern_str,plate_no): + # continue + # print(f'车牌号:{plate_no},时间:{minute}分{sec}秒') + # time_str =f'{minute}分{sec}秒' + # writer.writerow({"车牌":plate_no,"时间":time_str}) + # out.write(ori_img) + + + else: + print("失败") + capture.release() + out.release() + cv2.destroyAllWindows() + print(f"all frame is {frame_count},average fps is {fps_all/frame_count} fps") \ No newline at end of file diff --git a/detect_test.py b/detect_test.py new file mode 100644 index 0000000..9f1a90b --- /dev/null +++ b/detect_test.py @@ -0,0 +1,28 @@ +import torch +from ultralytics import YOLO + +# 加载预训练的模型 +model = YOLO('weights/plate_detect.pt') + +# 设置图像路径 +image_path = r'D:\Project\ChePai\test\images\val\20230331163841.jpg' + +# 进行推理 +results = model(image_path) + +# 解析结果 +for r in results: + boxes = r.boxes # 包含检测结果的Boxes对象 + # 获取边界框坐标 + box_coordinates = boxes.xyxy.cpu().numpy() + # 获取置信度分数 + confidences = boxes.conf.cpu().numpy() + # 获取类别标签 + labels = boxes.cls.cpu().numpy().astype(int) + + # 打印检测结果 + for i in range(len(box_coordinates)): + x1, y1, x2, y2 = box_coordinates[i] + confidence = confidences[i] + label = model.names[labels[i]] + print(f'Object: {label}, Confidence: {confidence:.2f}, Bounding Box: ({x1:.2f}, {y1:.2f}, {x2:.2f}, {y2:.2f})') \ No newline at end of file diff --git a/export.py b/export.py new file mode 100644 index 0000000..47854c0 --- /dev/null +++ b/export.py @@ -0,0 +1,161 @@ +"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats + +Usage: + $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 +""" + +import argparse +import sys +import time + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +import torch +import torch.nn as nn + +import models +from models.experimental import attempt_load +from utils.activations import Hardswish, SiLU +from utils.general import set_logging, check_img_size +import onnx + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/ + parser.add_argument('--img_size', nargs='+', type=int, default=[640, 640], help='image size') # height, width + parser.add_argument('--batch_size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic', action='store_true', default=False, help='enable dynamic axis in onnx model') + parser.add_argument('--onnx2pb', action='store_true', default=False, help='export onnx to pb') + parser.add_argument('--onnx_infer', action='store_true', default=True, help='onnx infer test') + #=======================TensorRT================================= + parser.add_argument('--onnx2trt', action='store_true', default=False, help='export onnx to tensorrt') + parser.add_argument('--fp16_trt', action='store_true', default=False, help='fp16 infer') + #================================================================ + opt = parser.parse_args() + opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + print(opt) + set_logging() + t = time.time() + + # Load PyTorch model + model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model + delattr(model.model[-1], 'anchor_grid') + model.model[-1].anchor_grid=[torch.zeros(1)] * 3 # nl=3 number of detection layers + model.model[-1].export_cat = True + model.eval() + labels = model.names + + # Checks + gs = int(max(model.stride)) # grid size (max stride) + opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples + + # Input + img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection + + # Update model + for k, m in model.named_modules(): + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + if isinstance(m, models.common.Conv): # assign export-friendly activations + if isinstance(m.act, nn.Hardswish): + m.act = Hardswish() + elif isinstance(m.act, nn.SiLU): + m.act = SiLU() + # elif isinstance(m, models.yolo.Detect): + # m.forward = m.forward_export # assign forward (optional) + if isinstance(m, models.common.ShuffleV2Block):#shufflenet block nn.SiLU + for i in range(len(m.branch1)): + if isinstance(m.branch1[i], nn.SiLU): + m.branch1[i] = SiLU() + for i in range(len(m.branch2)): + if isinstance(m.branch2[i], nn.SiLU): + m.branch2[i] = SiLU() + if isinstance(m, models.common.BlazeBlock):#shufflenet block nn.SiLU + if isinstance(m.relu, nn.SiLU): + m.relu = SiLU() + if isinstance(m, models.common.DoubleBlazeBlock):#shufflenet block nn.SiLU + if isinstance(m.relu, nn.SiLU): + m.relu = SiLU() + for i in range(len(m.branch1)): + if isinstance(m.branch1[i], nn.SiLU): + m.branch1[i] = SiLU() + # for i in range(len(m.branch2)): + # if isinstance(m.branch2[i], nn.SiLU): + # m.branch2[i] = SiLU() + y = model(img) # dry run + + # ONNX export + print('\nStarting ONNX export with onnx %s...' % onnx.__version__) + f = opt.weights.replace('.pt', '.onnx') # filename + model.fuse() # only for ONNX + input_names=['input'] + output_names=['output'] + #tensorrt 7 + # grid = model.model[-1].anchor_grid + # model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + #tensorrt 7 + + torch.onnx.export(model, img, f, verbose=False, opset_version=12, + input_names=input_names, + output_names=output_names, + dynamic_axes = {'input': {0: 'batch'}, + 'output': {0: 'batch'} + } if opt.dynamic else None) + + # model.model[-1].anchor_grid = grid + + # Checks + onnx_model = onnx.load(f) # load onnx model + onnx.checker.check_model(onnx_model) # check onnx model + print('ONNX export success, saved as %s' % f) + # Finish + print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) + + + # onnx infer + if opt.onnx_infer: + import onnxruntime + import numpy as np + providers = ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(f, providers=providers) + im = img.cpu().numpy().astype(np.float32) # torch to numpy + y_onnx = session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: im})[0] + print("pred's shape is ",y_onnx.shape) + print("max(|torch_pred - onnx_pred|) =",abs(y.cpu().numpy()-y_onnx).max()) + + + # TensorRT export + if opt.onnx2trt: + from torch2trt.trt_model import ONNX_to_TRT + print('\nStarting TensorRT...') + ONNX_to_TRT(onnx_model_path=f,trt_engine_path=f.replace('.onnx', '.trt'),fp16_mode=opt.fp16_trt) + + # PB export + if opt.onnx2pb: + print('download the newest onnx_tf by https://github.com/onnx/onnx-tensorflow/tree/master/onnx_tf') + from onnx_tf.backend import prepare + import tensorflow as tf + + outpb = f.replace('.onnx', '.pb') # filename + # strict=True maybe leads to KeyError: 'pyfunc_0', check: https://github.com/onnx/onnx-tensorflow/issues/167 + tf_rep = prepare(onnx_model, strict=False) # prepare tf representation + tf_rep.export_graph(outpb) # export the model + + out_onnx = tf_rep.run(img) # onnx output + + # check pb + with tf.Graph().as_default(): + graph_def = tf.GraphDef() + with open(outpb, "rb") as f: + graph_def.ParseFromString(f.read()) + tf.import_graph_def(graph_def, name="") + with tf.Session() as sess: + init = tf.global_variables_initializer() + input_x = sess.graph.get_tensor_by_name(input_names[0]+':0') # input + outputs = [] + for i in output_names: + outputs.append(sess.graph.get_tensor_by_name(i+':0')) + out_pb = sess.run(outputs, feed_dict={input_x: img}) + + print(f'out_pytorch {y}') + print(f'out_onnx {out_onnx}') + print(f'out_pb {out_pb}') diff --git a/hubconf.py b/hubconf.py new file mode 100644 index 0000000..c4485a4 --- /dev/null +++ b/hubconf.py @@ -0,0 +1,141 @@ +"""File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/ + +Usage: + import torch + model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80) +""" + +from pathlib import Path + +import torch + +from models.yolo import Model +from utils.general import set_logging +from utils.google_utils import attempt_download + +dependencies = ['torch', 'yaml'] +set_logging() + + +def create(name, pretrained, channels, classes, autoshape): + """Creates a specified YOLOv5 model + + Arguments: + name (str): name of model, i.e. 'yolov5s' + pretrained (bool): load pretrained weights into the model + channels (int): number of input channels + classes (int): number of model classes + + Returns: + pytorch model + """ + config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path + try: + model = Model(config, channels, classes) + if pretrained: + fname = f'{name}.pt' # checkpoint filename + attempt_download(fname) # download if not found locally + ckpt = torch.load(fname, map_location=torch.device('cpu')) # load + state_dict = ckpt['model'].float().state_dict() # to FP32 + state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter + model.load_state_dict(state_dict, strict=False) # load + if len(ckpt['model'].names) == classes: + model.names = ckpt['model'].names # set class names attribute + if autoshape: + model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + return model + + except Exception as e: + help_url = 'https://github.com/ultralytics/yolov5/issues/36' + s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url + raise Exception(s) from e + + +def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True): + """YOLOv5-small model from https://github.com/ultralytics/yolov5 + + Arguments: + pretrained (bool): load pretrained weights into the model, default=False + channels (int): number of input channels, default=3 + classes (int): number of model classes, default=80 + + Returns: + pytorch model + """ + return create('yolov5s', pretrained, channels, classes, autoshape) + + +def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True): + """YOLOv5-medium model from https://github.com/ultralytics/yolov5 + + Arguments: + pretrained (bool): load pretrained weights into the model, default=False + channels (int): number of input channels, default=3 + classes (int): number of model classes, default=80 + + Returns: + pytorch model + """ + return create('yolov5m', pretrained, channels, classes, autoshape) + + +def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True): + """YOLOv5-large model from https://github.com/ultralytics/yolov5 + + Arguments: + pretrained (bool): load pretrained weights into the model, default=False + channels (int): number of input channels, default=3 + classes (int): number of model classes, default=80 + + Returns: + pytorch model + """ + return create('yolov5l', pretrained, channels, classes, autoshape) + + +def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True): + """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 + + Arguments: + pretrained (bool): load pretrained weights into the model, default=False + channels (int): number of input channels, default=3 + classes (int): number of model classes, default=80 + + Returns: + pytorch model + """ + return create('yolov5x', pretrained, channels, classes, autoshape) + + +def custom(path_or_model='path/to/model.pt', autoshape=True): + """YOLOv5-custom model from https://github.com/ultralytics/yolov5 + + Arguments (3 options): + path_or_model (str): 'path/to/model.pt' + path_or_model (dict): torch.load('path/to/model.pt') + path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] + + Returns: + pytorch model + """ + model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint + if isinstance(model, dict): + model = model['model'] # load model + + hub_model = Model(model.yaml).to(next(model.parameters()).device) # create + hub_model.load_state_dict(model.float().state_dict()) # load state_dict + hub_model.names = model.names # class names + return hub_model.autoshape() if autoshape else hub_model + + +if __name__ == '__main__': + model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example + # model = custom(path_or_model='path/to/model.pt') # custom example + + # Verify inference + from PIL import Image + + imgs = [Image.open(x) for x in Path('data/images').glob('*.jpg')] + results = model(imgs) + results.show() + results.print() diff --git a/image/README/1.png b/image/README/1.png new file mode 100644 index 0000000..54ec068 Binary files /dev/null and b/image/README/1.png differ diff --git a/image/README/105384078.png b/image/README/105384078.png new file mode 100644 index 0000000..681d773 Binary files /dev/null and b/image/README/105384078.png differ diff --git a/image/README/test_1.jpg b/image/README/test_1.jpg new file mode 100644 index 0000000..4f63fee Binary files /dev/null and b/image/README/test_1.jpg differ diff --git a/image/README/weixian.png b/image/README/weixian.png new file mode 100644 index 0000000..e18c595 Binary files /dev/null and b/image/README/weixian.png differ diff --git a/imgs/Quicker_20220930_180856.png b/imgs/Quicker_20220930_180856.png new file mode 100644 index 0000000..eed5017 Binary files /dev/null and b/imgs/Quicker_20220930_180856.png differ diff --git a/imgs/Quicker_20220930_180919.png b/imgs/Quicker_20220930_180919.png new file mode 100644 index 0000000..42abc0f Binary files /dev/null and b/imgs/Quicker_20220930_180919.png differ diff --git a/imgs/Quicker_20220930_180938.png b/imgs/Quicker_20220930_180938.png new file mode 100644 index 0000000..1e34d8c Binary files /dev/null and b/imgs/Quicker_20220930_180938.png differ diff --git a/imgs/Quicker_20220930_181044.png b/imgs/Quicker_20220930_181044.png new file mode 100644 index 0000000..cde51c1 Binary files /dev/null and b/imgs/Quicker_20220930_181044.png differ diff --git a/imgs/double_yellow.jpg b/imgs/double_yellow.jpg new file mode 100644 index 0000000..b42b61c Binary files /dev/null and b/imgs/double_yellow.jpg differ diff --git a/imgs/hongkang1.jpg b/imgs/hongkang1.jpg new file mode 100644 index 0000000..23099b1 Binary files /dev/null and b/imgs/hongkang1.jpg differ diff --git a/imgs/moto.png b/imgs/moto.png new file mode 100644 index 0000000..c0d49db Binary files /dev/null and b/imgs/moto.png differ diff --git a/imgs/police.jpg b/imgs/police.jpg new file mode 100644 index 0000000..9ac0bb1 Binary files /dev/null and b/imgs/police.jpg differ diff --git a/imgs/shi_lin_guan.jpg b/imgs/shi_lin_guan.jpg new file mode 100644 index 0000000..f7c216d Binary files /dev/null and b/imgs/shi_lin_guan.jpg differ diff --git a/imgs/single_blue.jpg b/imgs/single_blue.jpg new file mode 100644 index 0000000..28abf64 Binary files /dev/null and b/imgs/single_blue.jpg differ diff --git a/imgs/single_green.jpg b/imgs/single_green.jpg new file mode 100644 index 0000000..0a0e0a9 Binary files /dev/null and b/imgs/single_green.jpg differ diff --git a/imgs/single_yellow.jpg b/imgs/single_yellow.jpg new file mode 100644 index 0000000..ebe3a26 Binary files /dev/null and b/imgs/single_yellow.jpg differ diff --git a/imgs/tmp8F1F.png b/imgs/tmp8F1F.png new file mode 100644 index 0000000..aeaa7fa Binary files /dev/null and b/imgs/tmp8F1F.png differ diff --git a/imgs/tmpA5E3.png b/imgs/tmpA5E3.png new file mode 100644 index 0000000..b75872b Binary files /dev/null and b/imgs/tmpA5E3.png differ diff --git a/imgs/xue.jpg b/imgs/xue.jpg new file mode 100644 index 0000000..d583ac7 Binary files /dev/null and b/imgs/xue.jpg differ diff --git a/json2yolo.py b/json2yolo.py new file mode 100644 index 0000000..9bfc069 --- /dev/null +++ b/json2yolo.py @@ -0,0 +1,121 @@ +import json +import os +import numpy as np +from copy import deepcopy +import cv2 + +def allFilePath(rootPath,allFIleList): + fileList = os.listdir(rootPath) + for temp in fileList: + if os.path.isfile(os.path.join(rootPath,temp)): + allFIleList.append(os.path.join(rootPath,temp)) + else: + allFilePath(os.path.join(rootPath,temp),allFIleList) + +def xywh2yolo(rect,landmarks_sort,img): + h,w,c =img.shape + rect[0] = max(0, rect[0]) + rect[1] = max(0, rect[1]) + rect[2] = min(w - 1, rect[2]-rect[0]) + rect[3] = min(h - 1, rect[3]-rect[1]) + annotation = np.zeros((1, 12)) + annotation[0, 0] = (rect[0] + rect[2] / 2) / w # cx + annotation[0, 1] = (rect[1] + rect[3] / 2) / h # cy + annotation[0, 2] = rect[2] / w # w + annotation[0, 3] = rect[3] / h # h + + annotation[0, 4] = landmarks_sort[0][0] / w # l0_x + annotation[0, 5] = landmarks_sort[0][1] / h # l0_y + annotation[0, 6] = landmarks_sort[1][0] / w # l1_x + annotation[0, 7] = landmarks_sort[1][1] / h # l1_y + annotation[0, 8] = landmarks_sort[2][0] / w # l2_x + annotation[0, 9] = landmarks_sort[2][1] / h # l2_y + annotation[0, 10] = landmarks_sort[3][0] / w # l3_x + annotation[0, 11] = landmarks_sort[3][1] / h # l3_y + # annotation[0, 12] = (landmarks_sort[0][0]+landmarks_sort[1][0])/2 / w # l4_x + # annotation[0, 13] = (landmarks_sort[0][1]+landmarks_sort[1][1])/2 / h # l4_y + return annotation + +def order_points(pts): + rect = np.zeros((4, 2), dtype = "float32") + s = pts.sum(axis = 1) + rect[0] = pts[np.argmin(s)] + rect[2] = pts[np.argmax(s)] + diff = np.diff(pts, axis = 1) + rect[1] = pts[np.argmin(diff)] + rect[3] = pts[np.argmax(diff)] + + # return the ordered coordinates + return rect + +def four_point_transform(image, pts): + rect = order_points(pts) + (tl, tr, br, bl) = rect + widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) + widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) + maxWidth = max(int(widthA), int(widthB)) + heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) + heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) + maxHeight = max(int(heightA), int(heightB)) + dst = np.array([ + [0, 0], + [maxWidth - 1, 0], + [maxWidth - 1, maxHeight - 1], + [0, maxHeight - 1]], dtype = "float32") + M = cv2.getPerspectiveTransform(rect, dst) + warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) + + # return the warped image + return warped + +if __name__ == "__main__": + pic_file_list = [] + pic_file = r"/mnt/Gpan/Mydata/pytorchPorject/datasets/ccpd/train_bisai/train_bisai" + save_small_path = "small" + label_file = ['0','1'] + allFilePath(pic_file,pic_file_list) + count=0 + index = 0 + for pic_ in pic_file_list: + if not pic_.endswith(".jpg"): + continue + count+=1 + img = cv2.imread(pic_) + img_name = os.path.basename(pic_) + txt_name = img_name.replace(".jpg",".txt") + txt_path = os.path.join(pic_file,txt_name) + json_file_ = pic_.replace(".jpg",".json") + if not os.path.exists(json_file_): + continue + with open(json_file_, 'r',encoding='utf-8') as a: + data_dict = json.load(a) + # print(data_dict['shapes']) + with open(txt_path,"w") as f: + for data_message in data_dict['shapes']: + index+=1 + label=data_message['label'] + points = data_message['points'] + pts = np.array(points) + # pts=order_points(pts) + # new_img = four_point_transform(img,pts) + roi_img_name = label+"_"+str(index)+".jpg" + save_path=os.path.join(save_small_path,roi_img_name) + # cv2.imwrite(save_path,new_img) + x_max,y_max = np.max(pts,axis=0) + x_min,y_min = np.min(pts,axis=0) + rect = [x_min,y_min,x_max,y_max] + rect1=deepcopy(rect) + annotation=xywh2yolo(rect1,pts,img) + print(data_message) + label = data_message['label'] + str_label = label_file.index(label) + # str_label = "0 " + str_label = str(str_label)+" " + for i in range(len(annotation[0])): + str_label = str_label + " " + str(annotation[0][i]) + str_label = str_label.replace('[', '').replace(']', '') + str_label = str_label.replace(',', '') + '\n' + + f.write(str_label) + print(count,img_name) + # point=data_message[points] diff --git a/main.py b/main.py new file mode 100644 index 0000000..d107e6a --- /dev/null +++ b/main.py @@ -0,0 +1,349 @@ +# -*- coding: UTF-8 -*- +from flask import Flask, request, jsonify +from PIL import Image +import io +import base64 +import time +from pathlib import Path +import os +import cv2 +import torch +import torch.backends.cudnn as cudnn +from numpy import random +import copy +import numpy as np +from models.experimental import attempt_load +from utils.datasets import letterbox +from utils.general import check_img_size, non_max_suppression_face, apply_classifier, scale_coords, xyxy2xywh, \ + strip_optimizer, set_logging, increment_path +from utils.plots import plot_one_box +from utils.torch_utils import select_device, load_classifier, time_synchronized +from utils.cv_puttext import cv2ImgAddText +from plate_recognition.plate_rec import get_plate_result, allFilePath, init_model, cv_imread +# from plate_recognition.plate_cls import cv_imread +from plate_recognition.double_plate_split_merge import get_split_merge + + +clors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255)] +danger = ['危', '险'] + + +def order_points(pts): # 四个点按照左上 右上 右下 左下排列 + rect = np.zeros((4, 2), dtype="float32") + s = pts.sum(axis=1) + rect[0] = pts[np.argmin(s)] + rect[2] = pts[np.argmax(s)] + diff = np.diff(pts, axis=1) + rect[1] = pts[np.argmin(diff)] + rect[3] = pts[np.argmax(diff)] + return rect + + +def four_point_transform(image, pts): # 透视变换得到车牌小图 + # rect = order_points(pts) + rect = pts.astype('float32') + (tl, tr, br, bl) = rect + widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) + widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) + maxWidth = max(int(widthA), int(widthB)) + heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) + heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) + maxHeight = max(int(heightA), int(heightB)) + dst = np.array([ + [0, 0], + [maxWidth - 1, 0], + [maxWidth - 1, maxHeight - 1], + [0, maxHeight - 1]], dtype="float32") + M = cv2.getPerspectiveTransform(rect, dst) + warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) + return warped + + +def load_model(weights, device): # 加载检测模型 + model = attempt_load(weights, map_location=device) # load FP32 model + return model + + +def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None): # 返回到原图坐标 + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2, 4, 6]] -= pad[0] # x padding + coords[:, [1, 3, 5, 7]] -= pad[1] # y padding + coords[:, :8] /= gain + # clip_coords(coords, img0_shape) + coords[:, 0].clamp_(0, img0_shape[1]) # x1 + coords[:, 1].clamp_(0, img0_shape[0]) # y1 + coords[:, 2].clamp_(0, img0_shape[1]) # x2 + coords[:, 3].clamp_(0, img0_shape[0]) # y2 + coords[:, 4].clamp_(0, img0_shape[1]) # x3 + coords[:, 5].clamp_(0, img0_shape[0]) # y3 + coords[:, 6].clamp_(0, img0_shape[1]) # x4 + coords[:, 7].clamp_(0, img0_shape[0]) # y4 + # coords[:, 8].clamp_(0, img0_shape[1]) # x5 + # coords[:, 9].clamp_(0, img0_shape[0]) # y5 + return coords + + +def get_plate_rec_landmark(img, xyxy, conf, landmarks, class_num, device, plate_rec_model, + is_color=False): # 获取车牌坐标以及四个角点坐标并获取车牌号 + h, w, c = img.shape + Box = {} + result_dict = {} + tl = 1 or round(0.002 * (h + w) / 2) + 1 # line/font thickness + + x1 = int(xyxy[0]) + y1 = int(xyxy[1]) + x2 = int(xyxy[2]) + y2 = int(xyxy[3]) + height = y2 - y1 + landmarks_np = np.zeros((4, 2)) + rect = [x1, y1, x2, y2] + for i in range(4): + point_x = int(landmarks[2 * i]) + point_y = int(landmarks[2 * i + 1]) + landmarks_np[i] = np.array([point_x, point_y]) + + class_label = int(class_num) # 车牌的的类型0代表单牌,1代表双层车牌 + roi_img = four_point_transform(img, landmarks_np) # 透视变换得到车牌小图 + if class_label: # 判断是否是双层车牌,是双牌的话进行分割后然后拼接 + roi_img = get_split_merge(roi_img) + if not is_color: + plate_number, rec_prob = get_plate_result(roi_img, device, plate_rec_model, is_color=is_color) # 对车牌小图进行识别 + else: + plate_number, rec_prob, plate_color, color_conf = get_plate_result(roi_img, device, plate_rec_model, + is_color=is_color) + Box['X'] = landmarks_np[0][0].tolist() # 车牌角点坐标 + Box['Y'] = landmarks_np[0][1].tolist() + Box['Width'] = rect[2] - rect[0] + Box['Height'] = rect[3] - rect[1] + # Box['label'] = plate_number # 车牌号 + # Box['rect'] = rect + result_dict['rect'] = rect # 车牌roi区域 + result_dict['detect_conf'] = conf # 检测区域得分 + result_dict['landmarks'] = landmarks_np.tolist() # 车牌角点坐标 + result_dict['plate_no'] = plate_number # 车牌号 + result_dict['rec_conf'] = rec_prob # 每个字符的概率 + result_dict['roi_height'] = roi_img.shape[0] # 车牌高度 + result_dict['plate_color'] = "" + if is_color: + result_dict['plate_color'] = plate_color # 车牌颜色 + result_dict['color_conf'] = color_conf # 颜色得分 + result_dict['plate_type'] = class_label # 单双层 0单层 1双层 + score = conf.tolist() + return plate_number, score, Box, result_dict + + +def detect_Recognition_plate(model, orgimg, device, plate_rec_model, img_size, is_color=False): # 获取车牌信息 + # Load model + # img_size = opt_img_size + conf_thres = 0.3 # 得分阈值 + iou_thres = 0.5 # nms的iou值 + dict_list = [] + result_jpg = [] + # orgimg = cv2.imread(image_path) # BGR + img0 = copy.deepcopy(orgimg) + assert orgimg is not None, 'Image Not Found ' + h0, w0 = orgimg.shape[:2] # orig hw + r = img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR + img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp) + + imgsz = check_img_size(img_size, s=model.stride.max()) # check img_size + + img = letterbox(img0, new_shape=imgsz)[0] # 检测前处理,图片长宽变为32倍数,比如变为640X640 + # img =process_data(img0) + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416 图片的BGR排列转为RGB,然后将图片的H,W,C排列变为C,H,W排列 + + # Run inference + t0 = time.time() + + img = torch.from_numpy(img).to(device) + img = img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + # Inference + pred = model(img)[0] + # Apply NMS + pred = non_max_suppression_face(pred, conf_thres, iou_thres) + # result_jpg.insert(0, pred) + # Process detections + for i, det in enumerate(pred): # detections per image + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], orgimg.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + + det[:, 5:13] = scale_coords_landmarks(img.shape[2:], det[:, 5:13], orgimg.shape).round() + + for j in range(det.size()[0]): + xyxy = det[j, :4].view(-1).tolist() + conf = det[j, 4].cpu().numpy() + landmarks = det[j, 5:13].view(-1).tolist() + class_num = det[j, 13].cpu().numpy() + label, score, Box, result_dict = get_plate_rec_landmark(orgimg, xyxy, conf, landmarks, class_num, + device, plate_rec_model, is_color=is_color) + dict_list.append(result_dict) + result_jpg.append(Box) + result_jpg.append(score) + result_jpg.append(label) + return dict_list, result_jpg + # cv2.imwrite('result.jpg', orgimg) + + +def draw_result(orgimg, dict_list, is_color=False): # 车牌结果画出来 + result_str = "" + for result in dict_list: + rect_area = result['rect'] + + x, y, w, h = rect_area[0], rect_area[1], rect_area[2] - rect_area[0], rect_area[3] - rect_area[1] + padding_w = 0.05 * w + padding_h = 0.11 * h + rect_area[0] = max(0, int(x - padding_w)) + rect_area[1] = max(0, int(y - padding_h)) + rect_area[2] = min(orgimg.shape[1], int(rect_area[2] + padding_w)) + rect_area[3] = min(orgimg.shape[0], int(rect_area[3] + padding_h)) + + height_area = result['roi_height'] + landmarks = result['landmarks'] + result_p = result['plate_no'] + if result['plate_type'] == 0: # 单层 + result_p += " " + result['plate_color'] + else: # 双层 + result_p += " " + result['plate_color'] + "双层" + result_str += result_p + " " + for i in range(4): # 关键点 + cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) + cv2.rectangle(orgimg, (rect_area[0], rect_area[1]), (rect_area[2], rect_area[3]), (0, 0, 255), 2) # 画框 + + labelSize = cv2.getTextSize(result_p, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1) # 获得字体的大小 + if rect_area[0] + labelSize[0][0] > orgimg.shape[1]: # 防止显示的文字越界 + rect_area[0] = int(orgimg.shape[1] - labelSize[0][0]) + orgimg = cv2.rectangle(orgimg, (rect_area[0], int(rect_area[1] - round(1.6 * labelSize[0][1]))), + (int(rect_area[0] + round(1.2 * labelSize[0][0])), rect_area[1] + labelSize[1]), + (255, 255, 255), cv2.FILLED) # 画文字框,背景白色 + + if len(result) >= 1: + orgimg = cv2ImgAddText(orgimg, result_p, rect_area[0], int(rect_area[1] - round(1.6 * labelSize[0][1])), + (0, 0, 0), 21) + # orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area) + + print(result_str) # 打印结果 + return orgimg, result_str + + +def get_second(capture): + if capture.isOpened(): + rate = capture.get(5) # 帧速率 + FrameNumber = capture.get(7) # 视频文件的帧数 + duration = FrameNumber / rate # 帧速率/视频总帧数 是时间,除以60之后单位是分钟 + return int(rate), int(FrameNumber), int(duration) + +def process_images(detect_model_path, rec_model_path, is_color, img, img_size, output, video_path): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + # 创建保存结果的文件夹 + save_path = output + if not os.path.exists(save_path): + os.mkdir(save_path) + + # 加载模型 + detect_model = load_model(detect_model_path, device) + plate_rec_model = init_model(device, rec_model_path, is_color=is_color) + + # img = cv_imread(image_path) + if img.shape[-1] == 4: + img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) + dict_list, result_jpg = detect_Recognition_plate(detect_model, img, device, plate_rec_model, img_size, + is_color=is_color) + # ori_img = draw_result(img, dict_list) + # ori_list=ori_img[0].tolist() + # result_jpg.insert(0,ori_list) + result_jpg.insert(0, [[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + return result_jpg + + +app = Flask(__name__) +def base64_to_image(base64_str): + # 去掉base64编码中的头部信息 + base64_str = base64_str.split(",")[-1] + # 解码base64字符串 + image_data = base64.b64decode(base64_str) + # 转换为numpy数组 + nparr = np.frombuffer(image_data, np.uint8) + # 解码为OpenCV格式的图片对象 + image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) + return image + +@app.route('/upload', methods=['POST']) +def upload_image(): + try: + # 从请求中获取base64编码的图片数据 + data = request.json + # print(data) + base64_str = data.get('image') + # print(base64_str) + if not base64_str: + return jsonify({'error': 'No image data provided'}), 400 + + # 将base64编码转换为图片 + image = base64_to_image(base64_str) + + result_jpg = process_images( + detect_model_path='weights/plate_detect.pt', + rec_model_path='weights/plate_rec_color.pth', + is_color=True, + img=image, + img_size=640, + output='result', + video_path='' # 如果处理图片,视频路径留空 + ) + + # 构建一个字典来存储结果 + results = [] + + # 添加注册矩阵 + register_matrix = [ + [1, 0, 0], + [0, 1, 0], + [0, 0, 1] + ] + results.append({"RegisterMatrix": register_matrix}) + + # 添加检测结果 + for i in range(1, len(result_jpg), 3): + box, score, label = result_jpg[i:i + 3] + box_data = box + + detection_result = { + "Box": box_data, + "Score": score, + "label": label + } + results.append(detection_result) + # print(detection_result) + # 返回处理结果 + return jsonify({"result.jpg": results}) + except Exception as e: + # 打印异常信息以帮助诊断 + print(f"Caught an exception: {type(e).__name__}: {str(e)}") + return jsonify({"error_msg": "Content processing is incorrect", + "error_code": "AIS.0404"}) + + + + +if __name__ == '__main__': + app.run(debug=True) diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/blazeface.yaml b/models/blazeface.yaml new file mode 100644 index 0000000..830278d --- /dev/null +++ b/models/blazeface.yaml @@ -0,0 +1,33 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [5,6, 10,13, 21,26] # P3/8 + - [55,72, 225,304, 438,553] # P4/16 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [24, 3, 2]], # 0-P1/2 + [-1, 2, BlazeBlock, [24]], # 1 + [-1, 1, BlazeBlock, [48, None, 2]], # 2-P2/4 + [-1, 2, BlazeBlock, [48]], # 3 + [-1, 1, DoubleBlazeBlock, [96, 24, 2]], # 4-P3/8 + [-1, 2, DoubleBlazeBlock, [96, 24]], # 5 + [-1, 1, DoubleBlazeBlock, [96, 24, 2]], # 6-P4/16 + [-1, 2, DoubleBlazeBlock, [96, 24]], # 7 + ] + + +# YOLOv5 head +head: + [[-1, 1, Conv, [64, 1, 1]], # 8 (P4/32-large) + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 5], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [64, 1, 1]], # 11 (P3/8-medium) + + [[11, 8], 1, Detect, [nc, anchors]], # Detect(P3, P4) + ] diff --git a/models/blazeface_fpn.yaml b/models/blazeface_fpn.yaml new file mode 100644 index 0000000..2ed2eaf --- /dev/null +++ b/models/blazeface_fpn.yaml @@ -0,0 +1,38 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [5,6, 10,13, 21,26] # P3/8 + - [55,72, 225,304, 438,553] # P4/16 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [24, 3, 2]], # 0-P1/2 + [-1, 2, BlazeBlock, [24]], # 1 + [-1, 1, BlazeBlock, [48, None, 2]], # 2-P2/4 + [-1, 2, BlazeBlock, [48]], # 3 + [-1, 1, DoubleBlazeBlock, [96, 24, 2]], # 4-P3/8 + [-1, 2, DoubleBlazeBlock, [96, 24]], # 5 + [-1, 1, DoubleBlazeBlock, [96, 24, 2]], # 6-P4/16 + [-1, 2, DoubleBlazeBlock, [96, 24]], # 7 + ] + + +# YOLOv5 head +head: + [[-1, 1, Conv, [48, 1, 1]], # 8 + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 5], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [48, 1, 1]], # 11 (P3/8-medium) + + [-1, 1, nn.MaxPool2d, [3, 2, 1]], # 12 + [[-1, 7], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [48, 1, 1]], # 14 (P4/16-large) + + [[11, 14], 1, Detect, [nc, anchors]], # Detect(P3, P4) + ] + diff --git a/models/common.py b/models/common.py new file mode 100644 index 0000000..e8d2efd --- /dev/null +++ b/models/common.py @@ -0,0 +1,456 @@ +# This file contains modules common to various models + +import math + +import numpy as np +import requests +import torch +import torch.nn as nn +from PIL import Image, ImageDraw + +from utils.datasets import letterbox +from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh +from utils.plots import color_list + +def autopad(k, p=None): # kernel, padding + # Pad to 'same' + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + +def channel_shuffle(x, groups): + batchsize, num_channels, height, width = x.data.size() + channels_per_group = num_channels // groups + + # reshape + x = x.view(batchsize, groups, channels_per_group, height, width) + x = torch.transpose(x, 1, 2).contiguous() + + # flatten + x = x.view(batchsize, -1, height, width) + return x + +def DWConv(c1, c2, k=1, s=1, act=True): + # Depthwise convolution + return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + +class Conv(nn.Module): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Conv, self).__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + #self.act = self.act = nn.LeakyReLU(0.1, inplace=True) if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def fuseforward(self, x): + return self.act(self.conv(x)) + +class StemBlock(nn.Module): + def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True): + super(StemBlock, self).__init__() + self.stem_1 = Conv(c1, c2, k, s, p, g, act) + self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0) + self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1) + self.stem_2p = nn.MaxPool2d(kernel_size=2,stride=2,ceil_mode=True) + self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0) + + def forward(self, x): + stem_1_out = self.stem_1(x) + stem_2a_out = self.stem_2a(stem_1_out) + stem_2b_out = self.stem_2b(stem_2a_out) + stem_2p_out = self.stem_2p(stem_1_out) + out = self.stem_3(torch.cat((stem_2b_out,stem_2p_out),1)) + return out + +class Bottleneck(nn.Module): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super(Bottleneck, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + +class BottleneckCSP(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSP, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.LeakyReLU(0.1, inplace=True) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) + + +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(C3, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + +class ShuffleV2Block(nn.Module): + def __init__(self, inp, oup, stride): + super(ShuffleV2Block, self).__init__() + + if not (1 <= stride <= 3): + raise ValueError('illegal stride value') + self.stride = stride + + branch_features = oup // 2 + assert (self.stride != 1) or (inp == branch_features << 1) + + if self.stride > 1: + self.branch1 = nn.Sequential( + self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1), + nn.BatchNorm2d(inp), + nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(branch_features), + nn.SiLU(), + ) + else: + self.branch1 = nn.Sequential() + + self.branch2 = nn.Sequential( + nn.Conv2d(inp if (self.stride > 1) else branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(branch_features), + nn.SiLU(), + self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1), + nn.BatchNorm2d(branch_features), + nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(branch_features), + nn.SiLU(), + ) + + @staticmethod + def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False): + return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i) + + def forward(self, x): + if self.stride == 1: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + else: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + out = channel_shuffle(out, 2) + return out + +class BlazeBlock(nn.Module): + def __init__(self, in_channels,out_channels,mid_channels=None,stride=1): + super(BlazeBlock, self).__init__() + mid_channels = mid_channels or in_channels + assert stride in [1, 2] + if stride>1: + self.use_pool = True + else: + self.use_pool = False + + self.branch1 = nn.Sequential( + nn.Conv2d(in_channels=in_channels,out_channels=mid_channels,kernel_size=5,stride=stride,padding=2,groups=in_channels), + nn.BatchNorm2d(mid_channels), + nn.Conv2d(in_channels=mid_channels,out_channels=out_channels,kernel_size=1,stride=1), + nn.BatchNorm2d(out_channels), + ) + + if self.use_pool: + self.shortcut = nn.Sequential( + nn.MaxPool2d(kernel_size=stride, stride=stride), + nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1), + nn.BatchNorm2d(out_channels), + ) + + self.relu = nn.SiLU(inplace=True) + + def forward(self, x): + branch1 = self.branch1(x) + out = (branch1+self.shortcut(x)) if self.use_pool else (branch1+x) + return self.relu(out) + +class DoubleBlazeBlock(nn.Module): + def __init__(self,in_channels,out_channels,mid_channels=None,stride=1): + super(DoubleBlazeBlock, self).__init__() + mid_channels = mid_channels or in_channels + assert stride in [1, 2] + if stride > 1: + self.use_pool = True + else: + self.use_pool = False + + self.branch1 = nn.Sequential( + nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=5, stride=stride,padding=2,groups=in_channels), + nn.BatchNorm2d(in_channels), + nn.Conv2d(in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1), + nn.BatchNorm2d(mid_channels), + nn.SiLU(inplace=True), + nn.Conv2d(in_channels=mid_channels, out_channels=mid_channels, kernel_size=5, stride=1,padding=2), + nn.BatchNorm2d(mid_channels), + nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1), + nn.BatchNorm2d(out_channels), + ) + + if self.use_pool: + self.shortcut = nn.Sequential( + nn.MaxPool2d(kernel_size=stride, stride=stride), + nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1), + nn.BatchNorm2d(out_channels), + ) + + self.relu = nn.SiLU(inplace=True) + + def forward(self, x): + branch1 = self.branch1(x) + out = (branch1 + self.shortcut(x)) if self.use_pool else (branch1 + x) + return self.relu(out) + + +class SPP(nn.Module): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13)): + super(SPP, self).__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) + + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Focus, self).__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + # return self.conv(self.contract(x)) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + + +class Concat(nn.Module): + # Concatenate a list of tensors along dimension + def __init__(self, dimension=1): + super(Concat, self).__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class NMS(nn.Module): + # Non-Maximum Suppression (NMS) module + conf = 0.25 # confidence threshold + iou = 0.45 # IoU threshold + classes = None # (optional list) filter by class + + def __init__(self): + super(NMS, self).__init__() + + def forward(self, x): + return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) + +class autoShape(nn.Module): + # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + img_size = 640 # inference size (pixels) + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + classes = None # (optional list) filter by class + + def __init__(self, model): + super(autoShape, self).__init__() + self.model = model.eval() + + def autoshape(self): + print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() + return self + + def forward(self, imgs, size=640, augment=False, profile=False): + # Inference from various sources. For height=720, width=1280, RGB images example inputs are: + # filename: imgs = 'data/samples/zidane.jpg' + # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3) + # PIL: = Image.open('image.jpg') # HWC x(720,1280,3) + # numpy: = np.zeros((720,1280,3)) # HWC + # torch: = torch.zeros(16,3,720,1280) # BCHW + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + p = next(self.model.parameters()) # for device and type + if isinstance(imgs, torch.Tensor): # torch + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images + shape0, shape1 = [], [] # image and inference shapes + for i, im in enumerate(imgs): + if isinstance(im, str): # filename or uri + im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open + im = np.array(im) # to numpy + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + imgs[i] = im # update + shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = np.stack(x, 0) if n > 1 else x[0][None] # stack + x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + + # Inference + with torch.no_grad(): + y = self.model(x, augment, profile)[0] # forward + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + + # Post-process + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + + return Detections(imgs, y, self.names) + + +class Detections: + # detections class for YOLOv5 inference results + def __init__(self, imgs, pred, names=None): + super(Detections, self).__init__() + d = pred[0].device # device + gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + self.imgs = imgs # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) + + def display(self, pprint=False, show=False, save=False, render=False): + colors = color_list() + for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): + str = f'Image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' + if pred is not None: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + str += f'{n} {self.names[int(c)]}s, ' # add to string + if show or save or render: + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + for *box, conf, cls in pred: # xyxy, confidence, class + # str += '%s %.2f, ' % (names[int(cls)], conf) # label + ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot + if pprint: + print(str) + if show: + img.show(f'Image {i}') # show + if save: + f = f'results{i}.jpg' + str += f"saved to '{f}'" + img.save(f) # save + if render: + self.imgs[i] = np.asarray(img) + + def print(self): + self.display(pprint=True) # print results + + def show(self): + self.display(show=True) # show results + + def save(self): + self.display(save=True) # save results + + def render(self): + self.display(render=True) # render results + return self.imgs + + def __len__(self): + return self.n + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)] + for d in x: + for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + +class Classify(nn.Module): + # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super(Classify, self).__init__() + self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) + self.flat = nn.Flatten() + + def forward(self, x): + z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list + return self.flat(self.conv(z)) # flatten to x(b,c2) diff --git a/models/experimental.py b/models/experimental.py new file mode 100644 index 0000000..72dc877 --- /dev/null +++ b/models/experimental.py @@ -0,0 +1,133 @@ +# This file contains experimental modules + +import numpy as np +import torch +import torch.nn as nn + +from models.common import Conv, DWConv +from utils.google_utils import attempt_download + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super(CrossConv, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super(Sum, self).__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super(GhostConv, self).__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k, s): + super(GhostBottleneck, self).__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + +class MixConv2d(nn.Module): + # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + super(MixConv2d, self).__init__() + groups = len(k) + if equal_ch: # equal c_ per group + i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * groups + a = np.eye(groups + 1, groups, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.LeakyReLU(0.1, inplace=True) + + def forward(self, x): + return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super(Ensemble, self).__init__() + + def forward(self, x, augment=False): + y = [] + for module in self: + y.append(module(x, augment)[0]) + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + +def attempt_load(weights, map_location=None): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + attempt_download(w) + model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model + + # Compatibility updates + for m in model.modules(): + if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True # pytorch 1.7.0 compatibility + elif type(m) is Conv: + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + + if len(model) == 1: + return model[-1] # return model + else: + print('Ensemble created with %s\n' % weights) + for k in ['names', 'stride']: + setattr(model, k, getattr(model[-1], k)) + return model # return ensemble diff --git a/models/yolo.py b/models/yolo.py new file mode 100644 index 0000000..4a285af --- /dev/null +++ b/models/yolo.py @@ -0,0 +1,351 @@ +import argparse +import logging +import math +import sys +from copy import deepcopy +from pathlib import Path + +import torch +import torch.nn as nn + +sys.path.append('./') # to run '$ python *.py' files in subdirectories +logger = logging.getLogger(__name__) + +from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, C3, ShuffleV2Block, Concat, NMS, autoShape, StemBlock, BlazeBlock, DoubleBlazeBlock +from models.experimental import MixConv2d, CrossConv +from utils.autoanchor import check_anchor_order +from utils.general import make_divisible, check_file, set_logging +from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ + select_device, copy_attr + +try: + import thop # for FLOPS computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + export_cat = False # onnx export cat output + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(Detect, self).__init__() + self.nc = nc # number of classes + #self.no = nc + 5 # number of outputs per anchor + self.no = nc + 5 + 8 # number of outputs per anchor + + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + # self.training=True + if self.export_cat: + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + # self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + self.grid[i], self.anchor_grid[i] = self._make_grid_new(nx, ny,i) + + y = torch.full_like(x[i], 0) + y = y + torch.cat((x[i][:, :, :, :, 0:5].sigmoid(), torch.cat((x[i][:, :, :, :, 5:13], x[i][:, :, :, :, 13:13+self.nc].sigmoid()), 4)), 4) + + box_xy = (y[:, :, :, :, 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy + box_wh = (y[:, :, :, :, 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + # box_conf = torch.cat((box_xy, torch.cat((box_wh, y[:, :, :, :, 4:5]), 4)), 4) + + landm1 = y[:, :, :, :, 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x1 y1 + landm2 = y[:, :, :, :, 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x2 y2 + landm3 = y[:, :, :, :, 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x3 y3 + landm4 = y[:, :, :, :, 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x4 y4 + prob= y[:, :, :, :, 13:13+self.nc] + score,index_ = torch.max(prob,dim=-1,keepdim=True) + score=score.type(box_xy.dtype) + index_=index_.type(box_xy.dtype) + index =torch.argmax(prob,dim=-1,keepdim=True).type(box_xy.dtype) + # landm5 = y[:, :, :, :, 13:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x5 y5 + # landm = torch.cat((landm1, torch.cat((landm2, torch.cat((landm3, torch.cat((landm4, landm5), 4)), 4)), 4)), 4) + # y = torch.cat((box_conf, torch.cat((landm, y[:, :, :, :, 13:13+self.nc]), 4)), 4) + y = torch.cat([box_xy, box_wh, y[:, :, :, :, 4:5], landm1, landm2, landm3, landm4, y[:, :, :, :, 13:13+self.nc]], -1) + + z.append(y.view(bs, -1, self.no)) + return torch.cat(z, 1) + + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = torch.full_like(x[i], 0) + class_range = list(range(5)) + list(range(13,13+self.nc)) + y[..., class_range] = x[i][..., class_range].sigmoid() + y[..., 5:13] = x[i][..., 5:13] + #y = x[i].sigmoid() + + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + + #y[..., 5:13] = y[..., 5:13] * 8 - 4 + y[..., 5:7] = y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] # landmark x1 y1 + y[..., 7:9] = y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x2 y2 + y[..., 9:11] = y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x3 y3 + y[..., 11:13] = y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x4 y4 + # y[..., 13:13] = y[..., 13:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]# landmark x5 y5 + + #y[..., 5:7] = (y[..., 5:7] * 2 -1) * self.anchor_grid[i] # landmark x1 y1 + #y[..., 7:9] = (y[..., 7:9] * 2 -1) * self.anchor_grid[i] # landmark x2 y2 + #y[..., 9:11] = (y[..., 9:11] * 2 -1) * self.anchor_grid[i] # landmark x3 y3 + #y[..., 11:13] = (y[..., 11:13] * 2 -1) * self.anchor_grid[i] # landmark x4 y4 + #y[..., 13:13] = (y[..., 13:13] * 2 -1) * self.anchor_grid[i] # landmark x5 y5 + + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + def _make_grid_new(self,nx=20, ny=20,i=0): + d = self.anchors[i].device + if '1.10.0' in torch.__version__: # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility + yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)], indexing='ij') + else: + yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)]) + grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() + anchor_grid = (self.anchors[i].clone() * self.stride[i]).view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() + return grid, anchor_grid +class Model(nn.Module): + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes + super(Model, self).__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc)) + self.yaml['nc'] = nc # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, Detect): + s = 128 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + m.anchors /= m.stride.view(-1, 1, 1) + check_anchor_order(m) + self.stride = m.stride + self._initialize_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + + # Init weights, biases + initialize_weights(self) + self.info() + logger.info('') + + def forward(self, x, augment=False, profile=False): + if augment: + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si) + yi = self.forward_once(xi)[0] # forward + # cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi[..., :4] /= si # de-scale + if fi == 2: + yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud + elif fi == 3: + yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr + y.append(yi) + return torch.cat(y, 1), None # augmented inference, train + else: + return self.forward_once(x, profile) # single-scale inference, train + + def forward_once(self, x, profile=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + if profile: + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + t = time_synchronized() + for _ in range(10): + _ = m(x) + dt.append((time_synchronized() - t) * 100) + print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + + if profile: + print('%.1fms total' % sum(dt)) + return x + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _print_biases(self): + m = self.model[-1] # Detect() module + for mi in m.m: # from + b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) + print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + + # def _print_weights(self): + # for m in self.model.modules(): + # if type(m) is Bottleneck: + # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + print('Fusing layers... ') + for m in self.model.modules(): + if type(m) is Conv and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.fuseforward # update forward + elif type(m) is nn.Upsample: + m.recompute_scale_factor = None # torch 1.11.0 compatibility + self.info() + return self + + def nms(self, mode=True): # add or remove NMS module + present = type(self.model[-1]) is NMS # last layer is NMS + if mode and not present: + print('Adding NMS... ') + m = NMS() # module + m.f = -1 # from + m.i = self.model[-1].i + 1 # index + self.model.add_module(name='%s' % m.i, module=m) # add + self.eval() + elif not mode and present: + print('Removing NMS... ') + self.model = self.model[:-1] # remove + return self + + def autoshape(self): # add autoShape module + print('Adding autoShape... ') + m = autoShape(self) # wrap model + copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes + return m + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + +def parse_model(d, ch): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, ShuffleV2Block, StemBlock, BlazeBlock, DoubleBlazeBlock]: + c1, c2 = ch[f], args[0] + + # Normal + # if i > 0 and args[0] != no: # channel expansion factor + # ex = 1.75 # exponential (default 2.0) + # e = math.log(c2 / ch[1]) / math.log(2) + # c2 = int(ch[1] * ex ** e) + # if m != Focus: + + c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 + + # Experimental + # if i > 0 and args[0] != no: # channel expansion factor + # ex = 1 + gw # exponential (default 2.0) + # ch1 = 32 # ch[1] + # e = math.log(c2 / ch1) / math.log(2) # level 1-n + # c2 = int(ch1 * ex ** e) + # if m != Focus: + # c2 = make_divisible(c2, 8) if c2 != no else c2 + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3]: + args.insert(2, n) + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[-1 if x == -1 else x + 1] for x in f]) + elif m is Detect: + args.append([ch[x + 1] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + else: + c2 = ch[f] + + m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +from thop import profile +from thop import clever_format +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + set_logging() + device = select_device(opt.device) + + # Create model + model = Model(opt.cfg).to(device) + stride = model.stride.max() + if stride == 32: + input = torch.Tensor(1, 3, 480, 640).to(device) + else: + input = torch.Tensor(1, 3, 512, 640).to(device) + model.train() + print(model) + flops, params = profile(model, inputs=(input, )) + flops, params = clever_format([flops, params], "%.3f") + print('Flops:', flops, ',Params:' ,params) diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml new file mode 100644 index 0000000..5c83025 --- /dev/null +++ b/models/yolov5l.yaml @@ -0,0 +1,47 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [4,5, 8,10, 13,16] # P3/8 + - [23,29, 43,55, 73,105] # P4/16 + - [146,217, 231,300, 335,433] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 2-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 4-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 6-P5/32 + [-1, 1, SPP, [1024, [3,5,7]]], + [-1, 3, C3, [1024, False]], # 8 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 5], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 12 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 3], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 16 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 13], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 19 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 9], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 22 (P5/32-large) + + [[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5l6.yaml b/models/yolov5l6.yaml new file mode 100644 index 0000000..0722e20 --- /dev/null +++ b/models/yolov5l6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [6,7, 9,11, 13,16] # P3/8 + - [18,23, 26,33, 37,47] # P4/16 + - [54,67, 77,104, 112,154] # P5/32 + - [174,238, 258,355, 445,568] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, StemBlock, [ 64, 3, 2 ] ], # 0-P1/2 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 2-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 4-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 6-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 8-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 10 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 7 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 14 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 5 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 18 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 3 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 22 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 19 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 25 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 15 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 28 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 11 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 31 (P6/64-xlarge) + + [ [ 22, 25, 28, 31 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] + diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml new file mode 100644 index 0000000..eab694c --- /dev/null +++ b/models/yolov5m.yaml @@ -0,0 +1,47 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [4,5, 8,10, 13,16] # P3/8 + - [23,29, 43,55, 73,105] # P4/16 + - [146,217, 231,300, 335,433] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 2-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 4-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 6-P5/32 + [-1, 1, SPP, [1024, [3,5,7]]], + [-1, 3, C3, [1024, False]], # 8 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 5], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 12 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 3], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 16 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 13], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 19 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 9], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 22 (P5/32-large) + + [[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5m6.yaml b/models/yolov5m6.yaml new file mode 100644 index 0000000..9bf4622 --- /dev/null +++ b/models/yolov5m6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [6,7, 9,11, 13,16] # P3/8 + - [18,23, 26,33, 37,47] # P4/16 + - [54,67, 77,104, 112,154] # P5/32 + - [174,238, 258,355, 445,568] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, StemBlock, [ 64, 3, 2 ] ], # 0-P1/2 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 2-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 4-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 6-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 8-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 10 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 7 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 14 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 5 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 18 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 3 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 22 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 19 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 25 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 15 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 28 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 11 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 31 (P6/64-xlarge) + + [ [ 22, 25, 28, 31 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] + diff --git a/models/yolov5n-0.5.yaml b/models/yolov5n-0.5.yaml new file mode 100644 index 0000000..49eda1d --- /dev/null +++ b/models/yolov5n-0.5.yaml @@ -0,0 +1,46 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 0.5 # layer channel multiple + +# anchors +anchors: + - [4,5, 8,10, 13,16] # P3/8 + - [23,29, 43,55, 73,105] # P4/16 + - [146,217, 231,300, 335,433] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4 + [-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8 + [-1, 3, ShuffleV2Block, [128, 1]], # 2 + [-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16 + [-1, 7, ShuffleV2Block, [256, 1]], # 4 + [-1, 1, ShuffleV2Block, [512, 2]], # 5-P5/32 + [-1, 3, ShuffleV2Block, [512, 1]], # 6 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P4 + [-1, 1, C3, [128, False]], # 10 + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 2], 1, Concat, [1]], # cat backbone P3 + [-1, 1, C3, [128, False]], # 14 (P3/8-small) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 11], 1, Concat, [1]], # cat head P4 + [-1, 1, C3, [128, False]], # 17 (P4/16-medium) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 7], 1, Concat, [1]], # cat head P5 + [-1, 1, C3, [128, False]], # 20 (P5/32-large) + + [[14, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] + diff --git a/models/yolov5n.yaml b/models/yolov5n.yaml new file mode 100644 index 0000000..f36c764 --- /dev/null +++ b/models/yolov5n.yaml @@ -0,0 +1,46 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [4,5, 8,10, 13,16] # P3/8 + - [23,29, 43,55, 73,105] # P4/16 + - [146,217, 231,300, 335,433] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4 + [-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8 + [-1, 3, ShuffleV2Block, [128, 1]], # 2 + [-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16 + [-1, 7, ShuffleV2Block, [256, 1]], # 4 + [-1, 1, ShuffleV2Block, [512, 2]], # 5-P5/32 + [-1, 3, ShuffleV2Block, [512, 1]], # 6 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P4 + [-1, 1, C3, [128, False]], # 10 + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 2], 1, Concat, [1]], # cat backbone P3 + [-1, 1, C3, [128, False]], # 14 (P3/8-small) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 11], 1, Concat, [1]], # cat head P4 + [-1, 1, C3, [128, False]], # 17 (P4/16-medium) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 7], 1, Concat, [1]], # cat head P5 + [-1, 1, C3, [128, False]], # 20 (P5/32-large) + + [[14, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] + diff --git a/models/yolov5n6.yaml b/models/yolov5n6.yaml new file mode 100644 index 0000000..50f7d26 --- /dev/null +++ b/models/yolov5n6.yaml @@ -0,0 +1,58 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [6,7, 9,11, 13,16] # P3/8 + - [18,23, 26,33, 37,47] # P4/16 + - [54,67, 77,104, 112,154] # P5/32 + - [174,238, 258,355, 445,568] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4 + [-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8 + [-1, 3, ShuffleV2Block, [128, 1]], # 2 + [-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16 + [-1, 7, ShuffleV2Block, [256, 1]], # 4 + [-1, 1, ShuffleV2Block, [384, 2]], # 5-P5/32 + [-1, 3, ShuffleV2Block, [384, 1]], # 6 + [-1, 1, ShuffleV2Block, [512, 2]], # 7-P6/64 + [-1, 3, ShuffleV2Block, [512, 1]], # 8 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P5 + [-1, 1, C3, [128, False]], # 12 + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P4 + [-1, 1, C3, [128, False]], # 16 (P4/8-small) + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 2], 1, Concat, [1]], # cat backbone P3 + [-1, 1, C3, [128, False]], # 20 (P3/8-small) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 17], 1, Concat, [1]], # cat head P4 + [-1, 1, C3, [128, False]], # 23 (P4/16-medium) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 13], 1, Concat, [1]], # cat head P5 + [-1, 1, C3, [128, False]], # 26 (P5/32-large) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 9], 1, Concat, [1]], # cat head P6 + [-1, 1, C3, [128, False]], # 29 (P6/64-large) + + [[20, 23, 26, 29], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] + diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml new file mode 100644 index 0000000..3e27b3b --- /dev/null +++ b/models/yolov5s.yaml @@ -0,0 +1,47 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.5 # layer channel multiple + +# anchors +anchors: + - [4,5, 8,10, 13,16] # P3/8 + - [23,29, 43,55, 73,105] # P4/16 + - [146,217, 231,300, 335,433] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 2-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 4-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 6-P5/32 + [-1, 1, SPP, [1024, [3,5,7]]], + [-1, 3, C3, [1024, False]], # 8 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 5], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 12 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 3], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 16 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 13], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 19 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 9], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 22 (P5/32-large) + + [[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5s6.yaml b/models/yolov5s6.yaml new file mode 100644 index 0000000..46e2164 --- /dev/null +++ b/models/yolov5s6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 1 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [6,7, 9,11, 13,16] # P3/8 + - [18,23, 26,33, 37,47] # P4/16 + - [54,67, 77,104, 112,154] # P5/32 + - [174,238, 258,355, 445,568] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, StemBlock, [ 64, 3, 2 ] ], # 0-P1/2 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 2-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 4-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 6-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 8-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 10 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 7 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 14 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 5 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 18 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 3 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 22 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 19 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 25 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 15 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 28 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 11 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 31 (P6/64-xlarge) + + [ [ 22, 25, 28, 31 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] + diff --git a/onnx_infer.py b/onnx_infer.py new file mode 100644 index 0000000..0016639 --- /dev/null +++ b/onnx_infer.py @@ -0,0 +1,256 @@ +import onnxruntime +import numpy as np +import cv2 +import copy +import os +import argparse +from PIL import Image, ImageDraw, ImageFont +import time +plate_color_list=['黑色','蓝色','绿色','白色','黄色'] +plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品" +mean_value,std_value=((0.588,0.193))#识别模型均值标准差 + +def decodePlate(preds): #识别后处理 + pre=0 + newPreds=[] + for i in range(len(preds)): + if preds[i]!=0 and preds[i]!=pre: + newPreds.append(preds[i]) + pre=preds[i] + plate="" + for i in newPreds: + plate+=plateName[int(i)] + return plate + # return newPreds + +def rec_pre_precessing(img,size=(48,168)): #识别前处理 + img =cv2.resize(img,(168,48)) + img = img.astype(np.float32) + img = (img/255-mean_value)/std_value #归一化 减均值 除标准差 + img = img.transpose(2,0,1) #h,w,c 转为 c,h,w + img = img.reshape(1,*img.shape) #channel,height,width转为batch,channel,height,channel + return img + +def get_plate_result(img,session_rec): #识别后处理 + img =rec_pre_precessing(img) + y_onnx_plate,y_onnx_color = session_rec.run([session_rec.get_outputs()[0].name,session_rec.get_outputs()[1].name], {session_rec.get_inputs()[0].name: img}) + index =np.argmax(y_onnx_plate,axis=-1) + index_color = np.argmax(y_onnx_color) + plate_color = plate_color_list[index_color] + # print(y_onnx[0]) + plate_no = decodePlate(index[0]) + return plate_no,plate_color + + +def allFilePath(rootPath,allFIleList): #遍历文件 + fileList = os.listdir(rootPath) + for temp in fileList: + if os.path.isfile(os.path.join(rootPath,temp)): + allFIleList.append(os.path.join(rootPath,temp)) + else: + allFilePath(os.path.join(rootPath,temp),allFIleList) + +def get_split_merge(img): #双层车牌进行分割后识别 + h,w,c = img.shape + img_upper = img[0:int(5/12*h),:] + img_lower = img[int(1/3*h):,:] + img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0])) + new_img = np.hstack((img_upper,img_lower)) + return new_img + + +def order_points(pts): # 关键点排列 按照(左上,右上,右下,左下)的顺序排列 + rect = np.zeros((4, 2), dtype = "float32") + s = pts.sum(axis = 1) + rect[0] = pts[np.argmin(s)] + rect[2] = pts[np.argmax(s)] + diff = np.diff(pts, axis = 1) + rect[1] = pts[np.argmin(diff)] + rect[3] = pts[np.argmax(diff)] + return rect + + +def four_point_transform(image, pts): #透视变换得到矫正后的图像,方便识别 + rect = order_points(pts) + (tl, tr, br, bl) = rect + widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) + widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) + maxWidth = max(int(widthA), int(widthB)) + heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) + heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) + maxHeight = max(int(heightA), int(heightB)) + dst = np.array([ + [0, 0], + [maxWidth - 1, 0], + [maxWidth - 1, maxHeight - 1], + [0, maxHeight - 1]], dtype = "float32") + M = cv2.getPerspectiveTransform(rect, dst) + warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) + + # return the warped image + return warped + +def my_letter_box(img,size=(640,640)): # + h,w,c = img.shape + r = min(size[0]/h,size[1]/w) + new_h,new_w = int(h*r),int(w*r) + top = int((size[0]-new_h)/2) + left = int((size[1]-new_w)/2) + + bottom = size[0]-new_h-top + right = size[1]-new_w-left + img_resize = cv2.resize(img,(new_w,new_h)) + img = cv2.copyMakeBorder(img_resize,top,bottom,left,right,borderType=cv2.BORDER_CONSTANT,value=(114,114,114)) + return img,r,left,top + +def xywh2xyxy(boxes): #xywh坐标变为 左上 ,右下坐标 x1,y1 x2,y2 + xywh =copy.deepcopy(boxes) + xywh[:,0]=boxes[:,0]-boxes[:,2]/2 + xywh[:,1]=boxes[:,1]-boxes[:,3]/2 + xywh[:,2]=boxes[:,0]+boxes[:,2]/2 + xywh[:,3]=boxes[:,1]+boxes[:,3]/2 + return xywh + +def my_nms(boxes,iou_thresh): #nms + index = np.argsort(boxes[:,4])[::-1] + keep = [] + while index.size >0: + i = index[0] + keep.append(i) + x1=np.maximum(boxes[i,0],boxes[index[1:],0]) + y1=np.maximum(boxes[i,1],boxes[index[1:],1]) + x2=np.minimum(boxes[i,2],boxes[index[1:],2]) + y2=np.minimum(boxes[i,3],boxes[index[1:],3]) + + w = np.maximum(0,x2-x1) + h = np.maximum(0,y2-y1) + + inter_area = w*h + union_area = (boxes[i,2]-boxes[i,0])*(boxes[i,3]-boxes[i,1])+(boxes[index[1:],2]-boxes[index[1:],0])*(boxes[index[1:],3]-boxes[index[1:],1]) + iou = inter_area/(union_area-inter_area) + idx = np.where(iou<=iou_thresh)[0] + index = index[idx+1] + return keep + +def restore_box(boxes,r,left,top): #返回原图上面的坐标 + boxes[:,[0,2,5,7,9,11]]-=left + boxes[:,[1,3,6,8,10,12]]-=top + + boxes[:,[0,2,5,7,9,11]]/=r + boxes[:,[1,3,6,8,10,12]]/=r + return boxes + +def detect_pre_precessing(img,img_size): #检测前处理 + img,r,left,top=my_letter_box(img,img_size) + # cv2.imwrite("1.jpg",img) + img =img[:,:,::-1].transpose(2,0,1).copy().astype(np.float32) + img=img/255 + img=img.reshape(1,*img.shape) + return img,r,left,top + +def post_precessing(dets,r,left,top,conf_thresh=0.3,iou_thresh=0.5):#检测后处理 + choice = dets[:,:,4]>conf_thresh + dets=dets[choice] + dets[:,13:15]*=dets[:,4:5] + box = dets[:,:4] + boxes = xywh2xyxy(box) + score= np.max(dets[:,13:15],axis=-1,keepdims=True) + index = np.argmax(dets[:,13:15],axis=-1).reshape(-1,1) + output = np.concatenate((boxes,score,dets[:,5:13],index),axis=1) + reserve_=my_nms(output,iou_thresh) + output=output[reserve_] + output = restore_box(output,r,left,top) + return output + +def rec_plate(outputs,img0,session_rec): #识别车牌 + dict_list=[] + for output in outputs: + result_dict={} + rect=output[:4].tolist() + land_marks = output[5:13].reshape(4,2) + roi_img = four_point_transform(img0,land_marks) + label = int(output[-1]) + score = output[4] + if label==1: #代表是双层车牌 + roi_img = get_split_merge(roi_img) + plate_no,plate_color = get_plate_result(roi_img,session_rec) + result_dict['rect']=rect + result_dict['landmarks']=land_marks.tolist() + result_dict['plate_no']=plate_no + result_dict['roi_height']=roi_img.shape[0] + result_dict['plate_color']=plate_color + dict_list.append(result_dict) + return dict_list + +def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20): #将识别结果画在图上 + if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型 + img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) + draw = ImageDraw.Draw(img) + fontText = ImageFont.truetype( + "fonts/platech.ttf", textSize, encoding="utf-8") + draw.text((left, top), text, textColor, font=fontText) + return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) + +def draw_result(orgimg,dict_list): + result_str ="" + for result in dict_list: + rect_area = result['rect'] + + x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1] + padding_w = 0.05*w + padding_h = 0.11*h + rect_area[0]=max(0,int(x-padding_w)) + rect_area[1]=min(orgimg.shape[1],int(y-padding_h)) + rect_area[2]=max(0,int(rect_area[2]+padding_w)) + rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h)) + + height_area = result['roi_height'] + landmarks=result['landmarks'] + result = result['plate_no'] + result_str+=result+" " + for i in range(4): #关键点 + cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) + cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),(255,255,0),2) #画框 + if len(result)>=1: + orgimg=cv2ImgAddText(orgimg,result,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area) + print(result_str) + return orgimg + +if __name__ == "__main__": + begin = time.time() + parser = argparse.ArgumentParser() + parser.add_argument('--detect_model',type=str, default=r'weights/plate_detect.onnx', help='model.pt path(s)') #检测模型 + parser.add_argument('--rec_model', type=str, default='weights/plate_rec_color.onnx', help='model.pt path(s)')#识别模型 + parser.add_argument('--image_path', type=str, default='imgs', help='source') + parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--output', type=str, default='result1', help='source') + opt = parser.parse_args() + file_list = [] + allFilePath(opt.image_path,file_list) + providers = ['CPUExecutionProvider'] + clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)] + img_size = (opt.img_size,opt.img_size) + session_detect = onnxruntime.InferenceSession(opt.detect_model, providers=providers ) + session_rec = onnxruntime.InferenceSession(opt.rec_model, providers=providers ) + if not os.path.exists(opt.output): + os.mkdir(opt.output) + save_path = opt.output + count = 0 + for pic_ in file_list: + count+=1 + print(count,pic_,end=" ") + img=cv2.imread(pic_) + img0 = copy.deepcopy(img) + img,r,left,top = detect_pre_precessing(img,img_size) #检测前处理 + # print(img.shape) + y_onnx = session_detect.run([session_detect.get_outputs()[0].name], {session_detect.get_inputs()[0].name: img})[0] + outputs = post_precessing(y_onnx,r,left,top) #检测后处理 + result_list=rec_plate(outputs,img0,session_rec) + ori_img = draw_result(img0,result_list) + img_name = os.path.basename(pic_) + save_img_path = os.path.join(save_path,img_name) + cv2.imwrite(save_img_path,ori_img) + print(f"总共耗时{time.time()-begin} s") + + + \ No newline at end of file diff --git a/openvino_infer.py b/openvino_infer.py new file mode 100644 index 0000000..032c65e --- /dev/null +++ b/openvino_infer.py @@ -0,0 +1,342 @@ +import cv2 +import matplotlib.pyplot as plt +import numpy as np +from openvino.runtime import Core +import os +import time +import copy +from PIL import Image, ImageDraw, ImageFont +import argparse + +def cv_imread(path): + img=cv2.imdecode(np.fromfile(path,dtype=np.uint8),-1) + return img + +def allFilePath(rootPath,allFIleList): + fileList = os.listdir(rootPath) + for temp in fileList: + if os.path.isfile(os.path.join(rootPath,temp)): + # if temp.endswith("jpg"): + allFIleList.append(os.path.join(rootPath,temp)) + else: + allFilePath(os.path.join(rootPath,temp),allFIleList) + +mean_value,std_value=((0.588,0.193))#识别模型均值标准差 +plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品" + +def rec_pre_precessing(img,size=(48,168)): #识别前处理 + img =cv2.resize(img,(168,48)) + img = img.astype(np.float32) + img = (img/255-mean_value)/std_value + img = img.transpose(2,0,1) + img = img.reshape(1,*img.shape) + return img + +def decodePlate(preds): #识别后处理 + pre=0 + newPreds=[] + preds=preds.astype(np.int8)[0] + for i in range(len(preds)): + if preds[i]!=0 and preds[i]!=pre: + newPreds.append(preds[i]) + pre=preds[i] + plate="" + for i in newPreds: + plate+=plateName[int(i)] + return plate + +def load_model(onnx_path): + ie = Core() + model_onnx = ie.read_model(model=onnx_path) + compiled_model_onnx = ie.compile_model(model=model_onnx, device_name="CPU") + output_layer_onnx = compiled_model_onnx.output(0) + return compiled_model_onnx,output_layer_onnx + +def get_plate_result(img,rec_model,rec_output): + img =rec_pre_precessing(img) + # time_b = time.time() + res_onnx = rec_model([img])[rec_output] + # time_e= time.time() + index =np.argmax(res_onnx,axis=-1) #找出最大概率的那个字符的序号 + plate_no = decodePlate(index) + # print(f'{plate_no},time is {time_e-time_b}') + return plate_no + + +def get_split_merge(img): #双层车牌进行分割后识别 + h,w,c = img.shape + img_upper = img[0:int(5/12*h),:] + img_lower = img[int(1/3*h):,:] + img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0])) + new_img = np.hstack((img_upper,img_lower)) + return new_img + + +def order_points(pts): + rect = np.zeros((4, 2), dtype = "float32") + s = pts.sum(axis = 1) + rect[0] = pts[np.argmin(s)] + rect[2] = pts[np.argmax(s)] + diff = np.diff(pts, axis = 1) + rect[1] = pts[np.argmin(diff)] + rect[3] = pts[np.argmax(diff)] + return rect + + +def four_point_transform(image, pts): + rect = order_points(pts) + (tl, tr, br, bl) = rect + widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) + widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) + maxWidth = max(int(widthA), int(widthB)) + heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) + heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) + maxHeight = max(int(heightA), int(heightB)) + dst = np.array([ + [0, 0], + [maxWidth - 1, 0], + [maxWidth - 1, maxHeight - 1], + [0, maxHeight - 1]], dtype = "float32") + M = cv2.getPerspectiveTransform(rect, dst) + warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) + + # return the warped image + return warped + +def my_letter_box(img,size=(640,640)): + h,w,c = img.shape + r = min(size[0]/h,size[1]/w) + new_h,new_w = int(h*r),int(w*r) + top = int((size[0]-new_h)/2) + left = int((size[1]-new_w)/2) + + bottom = size[0]-new_h-top + right = size[1]-new_w-left + img_resize = cv2.resize(img,(new_w,new_h)) + img = cv2.copyMakeBorder(img_resize,top,bottom,left,right,borderType=cv2.BORDER_CONSTANT,value=(114,114,114)) + return img,r,left,top + +def xywh2xyxy(boxes): + xywh =copy.deepcopy(boxes) + xywh[:,0]=boxes[:,0]-boxes[:,2]/2 + xywh[:,1]=boxes[:,1]-boxes[:,3]/2 + xywh[:,2]=boxes[:,0]+boxes[:,2]/2 + xywh[:,3]=boxes[:,1]+boxes[:,3]/2 + return xywh + +def my_nms(boxes,iou_thresh): + index = np.argsort(boxes[:,4])[::-1] + keep = [] + while index.size >0: + i = index[0] + keep.append(i) + x1=np.maximum(boxes[i,0],boxes[index[1:],0]) + y1=np.maximum(boxes[i,1],boxes[index[1:],1]) + x2=np.minimum(boxes[i,2],boxes[index[1:],2]) + y2=np.minimum(boxes[i,3],boxes[index[1:],3]) + + w = np.maximum(0,x2-x1) + h = np.maximum(0,y2-y1) + + inter_area = w*h + union_area = (boxes[i,2]-boxes[i,0])*(boxes[i,3]-boxes[i,1])+(boxes[index[1:],2]-boxes[index[1:],0])*(boxes[index[1:],3]-boxes[index[1:],1]) + iou = inter_area/(union_area-inter_area) + idx = np.where(iou<=iou_thresh)[0] + index = index[idx+1] + return keep + +def restore_box(boxes,r,left,top): + boxes[:,[0,2,5,7,9,11]]-=left + boxes[:,[1,3,6,8,10,12]]-=top + + boxes[:,[0,2,5,7,9,11]]/=r + boxes[:,[1,3,6,8,10,12]]/=r + return boxes + +def detect_pre_precessing(img,img_size): + img,r,left,top=my_letter_box(img,img_size) + # cv2.imwrite("1.jpg",img) + img =img[:,:,::-1].transpose(2,0,1).copy().astype(np.float32) + img=img/255 + img=img.reshape(1,*img.shape) + return img,r,left,top + +def post_precessing(dets,r,left,top,conf_thresh=0.3,iou_thresh=0.5):#检测后处理 + choice = dets[:,:,4]>conf_thresh + dets=dets[choice] + dets[:,13:15]*=dets[:,4:5] + box = dets[:,:4] + boxes = xywh2xyxy(box) + score= np.max(dets[:,13:15],axis=-1,keepdims=True) + index = np.argmax(dets[:,13:15],axis=-1).reshape(-1,1) + output = np.concatenate((boxes,score,dets[:,5:13],index),axis=1) + reserve_=my_nms(output,iou_thresh) + output=output[reserve_] + output = restore_box(output,r,left,top) + return output + +def rec_plate(outputs,img0,rec_model,rec_output): + dict_list=[] + for output in outputs: + result_dict={} + rect=output[:4].tolist() + land_marks = output[5:13].reshape(4,2) + roi_img = four_point_transform(img0,land_marks) + label = int(output[-1]) + if label==1: #代表是双层车牌 + roi_img = get_split_merge(roi_img) + plate_no = get_plate_result(roi_img,rec_model,rec_output) #得到车牌识别结果 + result_dict['rect']=rect + result_dict['landmarks']=land_marks.tolist() + result_dict['plate_no']=plate_no + result_dict['roi_height']=roi_img.shape[0] + dict_list.append(result_dict) + return dict_list + + + +def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20): + if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型 + img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) + draw = ImageDraw.Draw(img) + fontText = ImageFont.truetype( + "fonts/platech.ttf", textSize, encoding="utf-8") + draw.text((left, top), text, textColor, font=fontText) + return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) + +def draw_result(orgimg,dict_list): + result_str ="" + for result in dict_list: + rect_area = result['rect'] + + x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1] + padding_w = 0.05*w + padding_h = 0.11*h + rect_area[0]=max(0,int(x-padding_w)) + rect_area[1]=min(orgimg.shape[1],int(y-padding_h)) + rect_area[2]=max(0,int(rect_area[2]+padding_w)) + rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h)) + + height_area = result['roi_height'] + landmarks=result['landmarks'] + result = result['plate_no'] + result_str+=result+" " + # for i in range(4): #关键点 + # cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) + + if len(result)>=6: + cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),(0,0,255),2) #画框 + orgimg=cv2ImgAddText(orgimg,result,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area) + # print(result_str) + return orgimg + +def get_second(capture): + if capture.isOpened(): + rate = capture.get(5) # 帧速率 + FrameNumber = capture.get(7) # 视频文件的帧数 + duration = FrameNumber/rate # 帧速率/视频总帧数 是时间,除以60之后单位是分钟 + return int(rate),int(FrameNumber),int(duration) + +if __name__=="__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--detect_model',type=str, default=r'weights/plate_detect.onnx', help='model.pt path(s)') #检测模型 + parser.add_argument('--rec_model', type=str, default='weights/plate_rec.onnx', help='model.pt path(s)')#识别模型 + parser.add_argument('--image_path', type=str, default='imgs', help='source') + parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--output', type=str, default='result1', help='source') + opt = parser.parse_args() + file_list=[] + file_folder=opt.image_path + allFilePath(file_folder,file_list) + rec_onnx_path =opt.rec_model + detect_onnx_path=opt.detect_model + rec_model,rec_output=load_model(rec_onnx_path) + detect_model,detect_output=load_model(detect_onnx_path) + count=0 + img_size=(opt.img_size,opt.img_size) + begin=time.time() + save_path=opt.output + if not os.path.exists(save_path): + os.mkdir(save_path) + for pic_ in file_list: + + count+=1 + print(count,pic_,end=" ") + img=cv2.imread(pic_) + time_b = time.time() + if img.shape[-1]==4: + img = cv2.cvtColor(img,cv2.COLOR_BGRA2BGR) + img0 = copy.deepcopy(img) + img,r,left,top = detect_pre_precessing(img,img_size) #检测前处理 + # print(img.shape) + det_result = detect_model([img])[detect_output] + outputs = post_precessing(det_result,r,left,top) #检测后处理 + time_1 = time.time() + result_list=rec_plate(outputs,img0,rec_model,rec_output) + time_e= time.time() + print(f'耗时 {time_e-time_b} s') + ori_img = draw_result(img0,result_list) + img_name = os.path.basename(pic_) + save_img_path = os.path.join(save_path,img_name) + + cv2.imwrite(save_img_path,ori_img) +print(f"总共耗时{time.time()-begin} s") + + # video_name = r"plate.mp4" + # capture=cv2.VideoCapture(video_name) + # fourcc = cv2.VideoWriter_fourcc(*'MP4V') + # fps = capture.get(cv2.CAP_PROP_FPS) # 帧数 + # width, height = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 宽高 + # out = cv2.VideoWriter('2result.mp4', fourcc, fps, (width, height)) # 写入视频 + # frame_count = 0 + # fps_all=0 + # rate,FrameNumber,duration=get_second(capture) + # # with open("example.csv",mode='w',newline='') as example_file: + # # fieldnames = ['车牌', '时间'] + # # writer = csv.DictWriter(example_file, fieldnames=fieldnames, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) + # # writer.writeheader() + # if capture.isOpened(): + # while True: + # t1 = cv2.getTickCount() + # frame_count+=1 + # ret,img=capture.read() + # if not ret: + # break + # # if frame_count%rate==0: + # img0 = copy.deepcopy(img) + # img,r,left,top = detect_pre_precessing(img,img_size) #检测前处理 + # # print(img.shape) + # det_result = detect_model([img])[detect_output] + # outputs = post_precessing(det_result,r,left,top) #检测后处理 + # result_list=rec_plate(outputs,img0,rec_model,rec_output) + # ori_img = draw_result(img0,result_list) + # t2 =cv2.getTickCount() + # infer_time =(t2-t1)/cv2.getTickFrequency() + # fps=1.0/infer_time + # fps_all+=fps + # str_fps = f'fps:{fps:.4f}' + # out.write(ori_img) + # cv2.putText(ori_img,str_fps,(20,20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2) + # cv2.imshow("haha",ori_img) + # cv2.waitKey(1) + + # # current_time = int(frame_count/FrameNumber*duration) + # # sec = current_time%60 + # # minute = current_time//60 + # # for result_ in result_list: + # # plate_no = result_['plate_no'] + # # if not is_car_number(pattern_str,plate_no): + # # continue + # # print(f'车牌号:{plate_no},时间:{minute}分{sec}秒') + # # time_str =f'{minute}分{sec}秒' + # # writer.writerow({"车牌":plate_no,"时间":time_str}) + # # out.write(ori_img) + + + # else: + # print("失败") + # capture.release() + # out.release() + # cv2.destroyAllWindows() + # print(f"all frame is {frame_count},average fps is {fps_all/frame_count}") + diff --git a/plate_recognition/double_plate_split_merge.py b/plate_recognition/double_plate_split_merge.py new file mode 100644 index 0000000..24c6537 --- /dev/null +++ b/plate_recognition/double_plate_split_merge.py @@ -0,0 +1,15 @@ +import os +import cv2 +import numpy as np +def get_split_merge(img): + h,w,c = img.shape + img_upper = img[0:int(5/12*h),:] + img_lower = img[int(1/3*h):,:] + img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0])) + new_img = np.hstack((img_upper,img_lower)) + return new_img + +if __name__=="__main__": + img = cv2.imread("double_plate/tmp8078.png") + new_img =get_split_merge(img) + cv2.imwrite("double_plate/new.jpg",new_img) diff --git a/plate_recognition/plateNet.py b/plate_recognition/plateNet.py new file mode 100644 index 0000000..ce9a982 --- /dev/null +++ b/plate_recognition/plateNet.py @@ -0,0 +1,203 @@ +import torch.nn as nn +import torch + + +class myNet_ocr(nn.Module): + def __init__(self,cfg=None,num_classes=78,export=False): + super(myNet_ocr, self).__init__() + if cfg is None: + cfg =[32,32,64,64,'M',128,128,'M',196,196,'M',256,256] + # cfg =[32,32,'M',64,64,'M',128,128,'M',256,256] + self.feature = self.make_layers(cfg, True) + self.export = export + # self.classifier = nn.Linear(cfg[-1], num_classes) + # self.loc = nn.MaxPool2d((2, 2), (5, 1), (0, 1),ceil_mode=True) + # self.loc = nn.AvgPool2d((2, 2), (5, 2), (0, 1),ceil_mode=False) + self.loc = nn.MaxPool2d((5, 2), (1, 1),(0,1),ceil_mode=False) + self.newCnn=nn.Conv2d(cfg[-1],num_classes,1,1) + # self.newBn=nn.BatchNorm2d(num_classes) + def make_layers(self, cfg, batch_norm=False): + layers = [] + in_channels = 3 + for i in range(len(cfg)): + if i == 0: + conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + else : + if cfg[i] == 'M': + layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] + else: + conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=(1,1),stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + return nn.Sequential(*layers) + + def forward(self, x): + x = self.feature(x) + x=self.loc(x) + x=self.newCnn(x) + # x=self.newBn(x) + if self.export: + conv = x.squeeze(2) # b *512 * width + conv = conv.transpose(2,1) # [w, b, c] + # conv =conv.argmax(dim=2) + return conv + else: + b, c, h, w = x.size() + assert h == 1, "the height of conv must be 1" + conv = x.squeeze(2) # b *512 * width + conv = conv.permute(2, 0, 1) # [w, b, c] + # output = F.log_softmax(self.rnn(conv), dim=2) + output = torch.softmax(conv, dim=2) + return output + +myCfg = [32,'M',64,'M',96,'M',128,'M',256] +class myNet(nn.Module): + def __init__(self,cfg=None,num_classes=3): + super(myNet, self).__init__() + if cfg is None: + cfg = myCfg + self.feature = self.make_layers(cfg, True) + self.classifier = nn.Linear(cfg[-1], num_classes) + def make_layers(self, cfg, batch_norm=False): + layers = [] + in_channels = 3 + for i in range(len(cfg)): + if i == 0: + conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + else : + if cfg[i] == 'M': + layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] + else: + conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=1,stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + return nn.Sequential(*layers) + + def forward(self, x): + x = self.feature(x) + x = nn.AvgPool2d(kernel_size=3, stride=1)(x) + x = x.view(x.size(0), -1) + y = self.classifier(x) + return y + + +class MyNet_color(nn.Module): + def __init__(self, class_num=6): + super(MyNet_color, self).__init__() + self.class_num = class_num + self.backbone = nn.Sequential( + nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(5, 5), stride=(1, 1)), # 0 + torch.nn.BatchNorm2d(16), + nn.ReLU(), + nn.MaxPool2d(kernel_size=(2, 2)), + nn.Dropout(0), + nn.Flatten(), + nn.Linear(480, 64), + nn.Dropout(0), + nn.ReLU(), + nn.Linear(64, class_num), + nn.Dropout(0), + nn.Softmax(1) + ) + + def forward(self, x): + logits = self.backbone(x) + + return logits + + +class myNet_ocr_color(nn.Module): + def __init__(self,cfg=None,num_classes=78,export=False,color_num=None): + super(myNet_ocr_color, self).__init__() + if cfg is None: + cfg =[32,32,64,64,'M',128,128,'M',196,196,'M',256,256] + # cfg =[32,32,'M',64,64,'M',128,128,'M',256,256] + self.feature = self.make_layers(cfg, True) + self.export = export + self.color_num=color_num + self.conv_out_num=12 #颜色第一个卷积层输出通道12 + if self.color_num: + self.conv1=nn.Conv2d(cfg[-1],self.conv_out_num,kernel_size=3,stride=2) + self.bn1=nn.BatchNorm2d(self.conv_out_num) + self.relu1=nn.ReLU(inplace=True) + self.gap =nn.AdaptiveAvgPool2d(output_size=1) + self.color_classifier=nn.Conv2d(self.conv_out_num,self.color_num,kernel_size=1,stride=1) + self.color_bn = nn.BatchNorm2d(self.color_num) + self.flatten = nn.Flatten() + self.loc = nn.MaxPool2d((5, 2), (1, 1),(0,1),ceil_mode=False) + self.newCnn=nn.Conv2d(cfg[-1],num_classes,1,1) + # self.newBn=nn.BatchNorm2d(num_classes) + def make_layers(self, cfg, batch_norm=False): + layers = [] + in_channels = 3 + for i in range(len(cfg)): + if i == 0: + conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + else : + if cfg[i] == 'M': + layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] + else: + conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=(1,1),stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + return nn.Sequential(*layers) + + def forward(self, x): + x = self.feature(x) + if self.color_num: + x_color=self.conv1(x) + x_color=self.bn1(x_color) + x_color =self.relu1(x_color) + x_color = self.color_classifier(x_color) + x_color = self.color_bn(x_color) + x_color =self.gap(x_color) + x_color = self.flatten(x_color) + x=self.loc(x) + x=self.newCnn(x) + + if self.export: + conv = x.squeeze(2) # b *512 * width + conv = conv.transpose(2,1) # [w, b, c] + if self.color_num: + return conv,x_color + return conv + else: + b, c, h, w = x.size() + assert h == 1, "the height of conv must be 1" + conv = x.squeeze(2) # b *512 * width + conv = conv.permute(2, 0, 1) # [w, b, c] + output = F.log_softmax(conv, dim=2) + if self.color_num: + return output,x_color + return output + + +if __name__ == '__main__': + x = torch.randn(1,3,48,216) + model = myNet_ocr(num_classes=78,export=True) + out = model(x) + print(out.shape) \ No newline at end of file diff --git a/plate_recognition/plate_rec.py b/plate_recognition/plate_rec.py new file mode 100644 index 0000000..7231e77 --- /dev/null +++ b/plate_recognition/plate_rec.py @@ -0,0 +1,119 @@ +from plate_recognition.plateNet import myNet_ocr,myNet_ocr_color +import torch +import torch.nn as nn +import cv2 +import numpy as np +import os +import time +import sys + +def cv_imread(path): #可以读取中文路径的图片 + img=cv2.imdecode(np.fromfile(path,dtype=np.uint8),-1) + return img + +def allFilePath(rootPath,allFIleList): + fileList = os.listdir(rootPath) + for temp in fileList: + if os.path.isfile(os.path.join(rootPath,temp)): + if temp.endswith('.jpg') or temp.endswith('.png') or temp.endswith('.JPG'): + allFIleList.append(os.path.join(rootPath,temp)) + else: + allFilePath(os.path.join(rootPath,temp),allFIleList) +device = torch.device('cuda') if torch.cuda.is_available() else torch.device("cpu") +color=['黑色','蓝色','绿色','白色','黄色'] +plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品" +mean_value,std_value=(0.588,0.193) +def decodePlate(preds): + pre=0 + newPreds=[] + index=[] + for i in range(len(preds)): + if preds[i]!=0 and preds[i]!=pre: + newPreds.append(preds[i]) + index.append(i) + pre=preds[i] + return newPreds,index + +def image_processing(img,device): + img = cv2.resize(img, (168,48)) + img = np.reshape(img, (48, 168, 3)) + + # normalize + img = img.astype(np.float32) + img = (img / 255. - mean_value) / std_value + img = img.transpose([2, 0, 1]) + img = torch.from_numpy(img) + + img = img.to(device) + img = img.view(1, *img.size()) + return img + +def get_plate_result(img,device,model,is_color=False): + input = image_processing(img,device) + if is_color: #是否识别颜色 + preds,color_preds = model(input) + color_preds = torch.softmax(color_preds,dim=-1) + color_conf,color_index = torch.max(color_preds,dim=-1) + color_conf=color_conf.item() + else: + preds = model(input) + preds=torch.softmax(preds,dim=-1) + prob,index=preds.max(dim=-1) + index = index.view(-1).detach().cpu().numpy() + prob=prob.view(-1).detach().cpu().numpy() + + + # preds=preds.view(-1).detach().cpu().numpy() + newPreds,new_index=decodePlate(index) + prob=prob[new_index] + plate="" + for i in newPreds: + plate+=plateName[i] + # if not (plate[0] in plateName[1:44] ): + # return "" + if is_color: + return plate,prob,color[color_index],color_conf #返回车牌号以及每个字符的概率,以及颜色,和颜色的概率 + else: + return plate,prob + +def init_model(device,model_path,is_color = False): + # print( print(sys.path)) + # model_path ="plate_recognition/model/checkpoint_61_acc_0.9715.pth" + check_point = torch.load(model_path,map_location=device) + model_state=check_point['state_dict'] + cfg=check_point['cfg'] + color_classes=0 + if is_color: + color_classes=5 #颜色类别数 + model = myNet_ocr_color(num_classes=len(plateName),export=True,cfg=cfg,color_num=color_classes) + + model.load_state_dict(model_state,strict=False) + model.to(device) + model.eval() + return model + +# model = init_model(device) +if __name__ == '__main__': + model_path = r"weights/plate_rec_color.pth" + image_path ="images/tmp2424.png" + testPath = r"/mnt/Gpan/Mydata/pytorchPorject/CRNN/crnn_plate_recognition/images" + fileList=[] + allFilePath(testPath,fileList) +# result = get_plate_result(image_path,device) +# print(result) + is_color = False + model = init_model(device,model_path,is_color=is_color) + right=0 + begin = time.time() + + for imge_path in fileList: + img=cv2.imread(imge_path) + if is_color: + plate,_,plate_color,_=get_plate_result(img,device,model,is_color=is_color) + print(plate) + else: + plate,_=get_plate_result(img,device,model,is_color=is_color) + print(plate,imge_path) + + + diff --git a/readme/README.md b/readme/README.md new file mode 100644 index 0000000..3ffc81e --- /dev/null +++ b/readme/README.md @@ -0,0 +1,42 @@ +### **车牌检测训练** + +1. **下载数据集:** 数据集可以联系vx获取:we0091234 + 数据从CCPD和CRPD数据集中选取并转换的 + 数据集格式为yolo格式: + + ``` + label x y w h pt1x pt1y pt2x pt2y pt3x pt3y pt4x pt4y + ``` + + 关键点依次是(左上,右上,右下,左下) + 坐标都是经过归一化,x,y是中心点除以图片宽高,w,h是框的宽高除以图片宽高,ptx,pty是关键点坐标除以宽高 + + **自己的数据集**可以通过lablme 软件,create polygons标注车牌四个点即可,然后通过json2yolo.py 将数据集转为yolo格式,即可训练 +2. **修改 data/widerface.yaml train和val路径,换成你的数据路径** + + ``` + train: /your/train/path #修改成你的训练集路径 + val: /your/val/path #修改成你的验证集路径 + # number of classes + nc: 2 #这里用的是2分类,0 单层车牌 1 双层车牌 + + # class names + names: [ 'single','double'] + + ``` +3. **训练** + + ``` + python3 train.py --data data/widerface.yaml --cfg models/yolov5n-0.5.yaml --weights weights/plate_detect.pt --epoch 120 + ``` + + 结果存在run文件夹中 + +### onnx export + +1. 检测模型导出onnx,需要安装onnx-sim **[onnx-simplifier](https://github.com/daquexian/onnx-simplifier)** + + ``` + python export.py --weights ./weights/plate_detect.pt --img_size 640 --batch_size 1 + onnxsim weights/plate_detect.onnx weights/plate_detect.onnx + ``` diff --git a/result_Rainy/20230402110037.jpg b/result_Rainy/20230402110037.jpg new file mode 100644 index 0000000..7161caf Binary files /dev/null and b/result_Rainy/20230402110037.jpg differ diff --git a/result_Rainy/20230402114216.jpg b/result_Rainy/20230402114216.jpg new file mode 100644 index 0000000..fff9ab8 Binary files /dev/null and b/result_Rainy/20230402114216.jpg differ diff --git a/result_Rainy/20230402122209.jpg b/result_Rainy/20230402122209.jpg new file mode 100644 index 0000000..5ebc770 Binary files /dev/null and b/result_Rainy/20230402122209.jpg differ diff --git a/result_Rainy/20230402124902.jpg b/result_Rainy/20230402124902.jpg new file mode 100644 index 0000000..2f89617 Binary files /dev/null and b/result_Rainy/20230402124902.jpg differ diff --git a/result_Rainy/20230402174157.jpg b/result_Rainy/20230402174157.jpg new file mode 100644 index 0000000..ff07bdd Binary files /dev/null and b/result_Rainy/20230402174157.jpg differ diff --git a/result_Rainy/20230402174728.jpg b/result_Rainy/20230402174728.jpg new file mode 100644 index 0000000..08bc966 Binary files /dev/null and b/result_Rainy/20230402174728.jpg differ diff --git a/result_Rainy/20230402180202.jpg b/result_Rainy/20230402180202.jpg new file mode 100644 index 0000000..7520adb Binary files /dev/null and b/result_Rainy/20230402180202.jpg differ diff --git a/result_Rainy/20230402181145.jpg b/result_Rainy/20230402181145.jpg new file mode 100644 index 0000000..1c3389f Binary files /dev/null and b/result_Rainy/20230402181145.jpg differ diff --git a/result_Rainy/20230402181201.jpg b/result_Rainy/20230402181201.jpg new file mode 100644 index 0000000..e90db28 Binary files /dev/null and b/result_Rainy/20230402181201.jpg differ diff --git a/result_Rainy/20230402181706.jpg b/result_Rainy/20230402181706.jpg new file mode 100644 index 0000000..5d170ed Binary files /dev/null and b/result_Rainy/20230402181706.jpg differ diff --git a/result_Rainy/20230402181837.jpg b/result_Rainy/20230402181837.jpg new file mode 100644 index 0000000..59a3f80 Binary files /dev/null and b/result_Rainy/20230402181837.jpg differ diff --git a/result_Rainy/20230402181905.jpg b/result_Rainy/20230402181905.jpg new file mode 100644 index 0000000..a9930fd Binary files /dev/null and b/result_Rainy/20230402181905.jpg differ diff --git a/result_Rainy/20230402182355.jpg b/result_Rainy/20230402182355.jpg new file mode 100644 index 0000000..e59114d Binary files /dev/null and b/result_Rainy/20230402182355.jpg differ diff --git a/result_Rainy/20230402185127.jpg b/result_Rainy/20230402185127.jpg new file mode 100644 index 0000000..23b782a Binary files /dev/null and b/result_Rainy/20230402185127.jpg differ diff --git a/result_Rainy/20230402185227.jpg b/result_Rainy/20230402185227.jpg new file mode 100644 index 0000000..57355ad Binary files /dev/null and b/result_Rainy/20230402185227.jpg differ diff --git a/result_Rainy/20230402185235.jpg b/result_Rainy/20230402185235.jpg new file mode 100644 index 0000000..8ed3c87 Binary files /dev/null and b/result_Rainy/20230402185235.jpg differ diff --git a/result_Rainy/20230402185440.jpg b/result_Rainy/20230402185440.jpg new file mode 100644 index 0000000..afc2d54 Binary files /dev/null and b/result_Rainy/20230402185440.jpg differ diff --git a/result_Rainy/20230402185635.jpg b/result_Rainy/20230402185635.jpg new file mode 100644 index 0000000..f45239f Binary files /dev/null and b/result_Rainy/20230402185635.jpg differ diff --git a/result_Rainy/20230402185824.jpg b/result_Rainy/20230402185824.jpg new file mode 100644 index 0000000..d7c3809 Binary files /dev/null and b/result_Rainy/20230402185824.jpg differ diff --git a/result_Rainy/20230402190134.jpg b/result_Rainy/20230402190134.jpg new file mode 100644 index 0000000..a8b738f Binary files /dev/null and b/result_Rainy/20230402190134.jpg differ diff --git a/result_Rainy/20230402190219.jpg b/result_Rainy/20230402190219.jpg new file mode 100644 index 0000000..b221fe4 Binary files /dev/null and b/result_Rainy/20230402190219.jpg differ diff --git a/result_Rainy/20230402191206.jpg b/result_Rainy/20230402191206.jpg new file mode 100644 index 0000000..978ed05 Binary files /dev/null and b/result_Rainy/20230402191206.jpg differ diff --git a/result_Rainy/20230402191441.jpg b/result_Rainy/20230402191441.jpg new file mode 100644 index 0000000..cbf7704 Binary files /dev/null and b/result_Rainy/20230402191441.jpg differ diff --git a/result_Rainy/20230402193706.jpg b/result_Rainy/20230402193706.jpg new file mode 100644 index 0000000..8652956 Binary files /dev/null and b/result_Rainy/20230402193706.jpg differ diff --git a/result_Rainy/20230402204602.jpg b/result_Rainy/20230402204602.jpg new file mode 100644 index 0000000..00bf35a Binary files /dev/null and b/result_Rainy/20230402204602.jpg differ diff --git a/result_Rainy/20230402205418.jpg b/result_Rainy/20230402205418.jpg new file mode 100644 index 0000000..91ef35c Binary files /dev/null and b/result_Rainy/20230402205418.jpg differ diff --git a/result_Rainy/20230402215131.jpg b/result_Rainy/20230402215131.jpg new file mode 100644 index 0000000..8c70f93 Binary files /dev/null and b/result_Rainy/20230402215131.jpg differ diff --git a/result_Rainy/20230403095717.jpg b/result_Rainy/20230403095717.jpg new file mode 100644 index 0000000..4c2d26d Binary files /dev/null and b/result_Rainy/20230403095717.jpg differ diff --git a/result_Rainy/20230403095725.jpg b/result_Rainy/20230403095725.jpg new file mode 100644 index 0000000..f0e09a6 Binary files /dev/null and b/result_Rainy/20230403095725.jpg differ diff --git a/result_Rainy/20230403095737.jpg b/result_Rainy/20230403095737.jpg new file mode 100644 index 0000000..c430431 Binary files /dev/null and b/result_Rainy/20230403095737.jpg differ diff --git a/result_Rainy/20230403095753.jpg b/result_Rainy/20230403095753.jpg new file mode 100644 index 0000000..77068fc Binary files /dev/null and b/result_Rainy/20230403095753.jpg differ diff --git a/result_Rainy/20230403095805.jpg b/result_Rainy/20230403095805.jpg new file mode 100644 index 0000000..1173a87 Binary files /dev/null and b/result_Rainy/20230403095805.jpg differ diff --git a/result_Rainy/20230403100026.jpg b/result_Rainy/20230403100026.jpg new file mode 100644 index 0000000..0d3f5d6 Binary files /dev/null and b/result_Rainy/20230403100026.jpg differ diff --git a/result_Rainy/20230403100057.jpg b/result_Rainy/20230403100057.jpg new file mode 100644 index 0000000..3689fbb Binary files /dev/null and b/result_Rainy/20230403100057.jpg differ diff --git a/result_Rainy/20230403100126.jpg b/result_Rainy/20230403100126.jpg new file mode 100644 index 0000000..41be26b Binary files /dev/null and b/result_Rainy/20230403100126.jpg differ diff --git a/result_Rainy/20230403101514.jpg b/result_Rainy/20230403101514.jpg new file mode 100644 index 0000000..c67be35 Binary files /dev/null and b/result_Rainy/20230403101514.jpg differ diff --git a/result_Rainy/20230403102512.jpg b/result_Rainy/20230403102512.jpg new file mode 100644 index 0000000..5ec2bb3 Binary files /dev/null and b/result_Rainy/20230403102512.jpg differ diff --git a/result_Rainy/20230403103451.jpg b/result_Rainy/20230403103451.jpg new file mode 100644 index 0000000..948e215 Binary files /dev/null and b/result_Rainy/20230403103451.jpg differ diff --git a/result_Rainy/20230403103529.jpg b/result_Rainy/20230403103529.jpg new file mode 100644 index 0000000..c250eb4 Binary files /dev/null and b/result_Rainy/20230403103529.jpg differ diff --git a/result_Rainy/20230403103608.jpg b/result_Rainy/20230403103608.jpg new file mode 100644 index 0000000..556e30e Binary files /dev/null and b/result_Rainy/20230403103608.jpg differ diff --git a/result_Rainy/20230403104140.jpg b/result_Rainy/20230403104140.jpg new file mode 100644 index 0000000..ab11b45 Binary files /dev/null and b/result_Rainy/20230403104140.jpg differ diff --git a/result_Rainy/20230403104242.jpg b/result_Rainy/20230403104242.jpg new file mode 100644 index 0000000..a62cd99 Binary files /dev/null and b/result_Rainy/20230403104242.jpg differ diff --git a/result_Rainy/20230403104456.jpg b/result_Rainy/20230403104456.jpg new file mode 100644 index 0000000..9564577 Binary files /dev/null and b/result_Rainy/20230403104456.jpg differ diff --git a/result_Rainy/20230403104545.jpg b/result_Rainy/20230403104545.jpg new file mode 100644 index 0000000..2a3ba12 Binary files /dev/null and b/result_Rainy/20230403104545.jpg differ diff --git a/result_Rainy/20230403110417.jpg b/result_Rainy/20230403110417.jpg new file mode 100644 index 0000000..685645e Binary files /dev/null and b/result_Rainy/20230403110417.jpg differ diff --git a/result_Rainy/20230403110828.jpg b/result_Rainy/20230403110828.jpg new file mode 100644 index 0000000..c7f04e9 Binary files /dev/null and b/result_Rainy/20230403110828.jpg differ diff --git a/result_Rainy/20230403110834.jpg b/result_Rainy/20230403110834.jpg new file mode 100644 index 0000000..1364c8c Binary files /dev/null and b/result_Rainy/20230403110834.jpg differ diff --git a/result_Rainy/20230403112145.jpg b/result_Rainy/20230403112145.jpg new file mode 100644 index 0000000..965a7b5 Binary files /dev/null and b/result_Rainy/20230403112145.jpg differ diff --git a/result_Rainy/20230403112214.jpg b/result_Rainy/20230403112214.jpg new file mode 100644 index 0000000..47cb063 Binary files /dev/null and b/result_Rainy/20230403112214.jpg differ diff --git a/result_Rainy/20230403112311.jpg b/result_Rainy/20230403112311.jpg new file mode 100644 index 0000000..d891b45 Binary files /dev/null and b/result_Rainy/20230403112311.jpg differ diff --git a/result_Rainy/20230403112419.jpg b/result_Rainy/20230403112419.jpg new file mode 100644 index 0000000..7a0c06b Binary files /dev/null and b/result_Rainy/20230403112419.jpg differ diff --git a/result_Rainy/20230403113813.jpg b/result_Rainy/20230403113813.jpg new file mode 100644 index 0000000..658112b Binary files /dev/null and b/result_Rainy/20230403113813.jpg differ diff --git a/result_Rainy/20230403123413.jpg b/result_Rainy/20230403123413.jpg new file mode 100644 index 0000000..24204c4 Binary files /dev/null and b/result_Rainy/20230403123413.jpg differ diff --git a/result_Rainy/20230403124017.jpg b/result_Rainy/20230403124017.jpg new file mode 100644 index 0000000..3734fe7 Binary files /dev/null and b/result_Rainy/20230403124017.jpg differ diff --git a/result_Rainy/20230403124103.jpg b/result_Rainy/20230403124103.jpg new file mode 100644 index 0000000..cd95b52 Binary files /dev/null and b/result_Rainy/20230403124103.jpg differ diff --git a/result_Rainy/20230403124124.jpg b/result_Rainy/20230403124124.jpg new file mode 100644 index 0000000..ce6bb99 Binary files /dev/null and b/result_Rainy/20230403124124.jpg differ diff --git a/result_Rainy/20230403124136.jpg b/result_Rainy/20230403124136.jpg new file mode 100644 index 0000000..3c6a652 Binary files /dev/null and b/result_Rainy/20230403124136.jpg differ diff --git a/result_Rainy/20230403124217.jpg b/result_Rainy/20230403124217.jpg new file mode 100644 index 0000000..d808b60 Binary files /dev/null and b/result_Rainy/20230403124217.jpg differ diff --git a/result_Rainy/20230403124252.jpg b/result_Rainy/20230403124252.jpg new file mode 100644 index 0000000..d4b06f2 Binary files /dev/null and b/result_Rainy/20230403124252.jpg differ diff --git a/result_Rainy/20230403124302.jpg b/result_Rainy/20230403124302.jpg new file mode 100644 index 0000000..2a13d5f Binary files /dev/null and b/result_Rainy/20230403124302.jpg differ diff --git a/result_Rainy/20230403124351.jpg b/result_Rainy/20230403124351.jpg new file mode 100644 index 0000000..a157612 Binary files /dev/null and b/result_Rainy/20230403124351.jpg differ diff --git a/result_Rainy/20230403124405.jpg b/result_Rainy/20230403124405.jpg new file mode 100644 index 0000000..ad7eb93 Binary files /dev/null and b/result_Rainy/20230403124405.jpg differ diff --git a/result_Rainy/20230403124451.jpg b/result_Rainy/20230403124451.jpg new file mode 100644 index 0000000..05a7db7 Binary files /dev/null and b/result_Rainy/20230403124451.jpg differ diff --git a/result_Rainy/20230403124634.jpg b/result_Rainy/20230403124634.jpg new file mode 100644 index 0000000..2ff9d81 Binary files /dev/null and b/result_Rainy/20230403124634.jpg differ diff --git a/result_Rainy/20230403124804.jpg b/result_Rainy/20230403124804.jpg new file mode 100644 index 0000000..9bf0cd0 Binary files /dev/null and b/result_Rainy/20230403124804.jpg differ diff --git a/result_Rainy/20230403130503.jpg b/result_Rainy/20230403130503.jpg new file mode 100644 index 0000000..9ad3c39 Binary files /dev/null and b/result_Rainy/20230403130503.jpg differ diff --git a/result_Rainy/20230403133007.jpg b/result_Rainy/20230403133007.jpg new file mode 100644 index 0000000..0882c27 Binary files /dev/null and b/result_Rainy/20230403133007.jpg differ diff --git a/result_Rainy/20230403133306.jpg b/result_Rainy/20230403133306.jpg new file mode 100644 index 0000000..2f858b8 Binary files /dev/null and b/result_Rainy/20230403133306.jpg differ diff --git a/result_Rainy/20230403133325.jpg b/result_Rainy/20230403133325.jpg new file mode 100644 index 0000000..7c0deec Binary files /dev/null and b/result_Rainy/20230403133325.jpg differ diff --git a/result_Rainy/20230403133347.jpg b/result_Rainy/20230403133347.jpg new file mode 100644 index 0000000..5dae7fd Binary files /dev/null and b/result_Rainy/20230403133347.jpg differ diff --git a/result_Rainy/20230403133423.jpg b/result_Rainy/20230403133423.jpg new file mode 100644 index 0000000..ecf9666 Binary files /dev/null and b/result_Rainy/20230403133423.jpg differ diff --git a/result_Rainy/20230403134123.jpg b/result_Rainy/20230403134123.jpg new file mode 100644 index 0000000..d7933fd Binary files /dev/null and b/result_Rainy/20230403134123.jpg differ diff --git a/result_Rainy/20230403134542.jpg b/result_Rainy/20230403134542.jpg new file mode 100644 index 0000000..2874a1b Binary files /dev/null and b/result_Rainy/20230403134542.jpg differ diff --git a/result_Rainy/20230403134606.jpg b/result_Rainy/20230403134606.jpg new file mode 100644 index 0000000..debbc94 Binary files /dev/null and b/result_Rainy/20230403134606.jpg differ diff --git a/result_Rainy/20230403134619.jpg b/result_Rainy/20230403134619.jpg new file mode 100644 index 0000000..b0873f1 Binary files /dev/null and b/result_Rainy/20230403134619.jpg differ diff --git a/result_Rainy/20230403134804.jpg b/result_Rainy/20230403134804.jpg new file mode 100644 index 0000000..e07e503 Binary files /dev/null and b/result_Rainy/20230403134804.jpg differ diff --git a/result_Rainy/20230403134826.jpg b/result_Rainy/20230403134826.jpg new file mode 100644 index 0000000..1abab9b Binary files /dev/null and b/result_Rainy/20230403134826.jpg differ diff --git a/result_Rainy/20230403134831.jpg b/result_Rainy/20230403134831.jpg new file mode 100644 index 0000000..8929f86 Binary files /dev/null and b/result_Rainy/20230403134831.jpg differ diff --git a/result_Rainy/20230403134851.jpg b/result_Rainy/20230403134851.jpg new file mode 100644 index 0000000..a9762b4 Binary files /dev/null and b/result_Rainy/20230403134851.jpg differ diff --git a/result_Rainy/20230403135436.jpg b/result_Rainy/20230403135436.jpg new file mode 100644 index 0000000..2d91559 Binary files /dev/null and b/result_Rainy/20230403135436.jpg differ diff --git a/result_Rainy/20230403135701.jpg b/result_Rainy/20230403135701.jpg new file mode 100644 index 0000000..a0b5d29 Binary files /dev/null and b/result_Rainy/20230403135701.jpg differ diff --git a/result_Rainy/20230403140316.jpg b/result_Rainy/20230403140316.jpg new file mode 100644 index 0000000..7df8745 Binary files /dev/null and b/result_Rainy/20230403140316.jpg differ diff --git a/result_Rainy/20230403140510.jpg b/result_Rainy/20230403140510.jpg new file mode 100644 index 0000000..8b9cb83 Binary files /dev/null and b/result_Rainy/20230403140510.jpg differ diff --git a/result_Rainy/20230403140545.jpg b/result_Rainy/20230403140545.jpg new file mode 100644 index 0000000..20cd094 Binary files /dev/null and b/result_Rainy/20230403140545.jpg differ diff --git a/result_Rainy/20230403140607.jpg b/result_Rainy/20230403140607.jpg new file mode 100644 index 0000000..c93811a Binary files /dev/null and b/result_Rainy/20230403140607.jpg differ diff --git a/result_Rainy/20230403140809.jpg b/result_Rainy/20230403140809.jpg new file mode 100644 index 0000000..1103a9d Binary files /dev/null and b/result_Rainy/20230403140809.jpg differ diff --git a/result_Rainy/20230403140837.jpg b/result_Rainy/20230403140837.jpg new file mode 100644 index 0000000..a65b28b Binary files /dev/null and b/result_Rainy/20230403140837.jpg differ diff --git a/result_Rainy/20230403140923.jpg b/result_Rainy/20230403140923.jpg new file mode 100644 index 0000000..da3fbb7 Binary files /dev/null and b/result_Rainy/20230403140923.jpg differ diff --git a/result_Rainy/20230403141404.jpg b/result_Rainy/20230403141404.jpg new file mode 100644 index 0000000..3aea611 Binary files /dev/null and b/result_Rainy/20230403141404.jpg differ diff --git a/result_Rainy/20230403142918.jpg b/result_Rainy/20230403142918.jpg new file mode 100644 index 0000000..48b38b7 Binary files /dev/null and b/result_Rainy/20230403142918.jpg differ diff --git a/result_Rainy/20230403142924.jpg b/result_Rainy/20230403142924.jpg new file mode 100644 index 0000000..760d9d5 Binary files /dev/null and b/result_Rainy/20230403142924.jpg differ diff --git a/result_Rainy/20230403143111.jpg b/result_Rainy/20230403143111.jpg new file mode 100644 index 0000000..674e3fb Binary files /dev/null and b/result_Rainy/20230403143111.jpg differ diff --git a/result_Rainy/20230403143121.jpg b/result_Rainy/20230403143121.jpg new file mode 100644 index 0000000..eced3b7 Binary files /dev/null and b/result_Rainy/20230403143121.jpg differ diff --git a/result_Rainy/20230403143706.jpg b/result_Rainy/20230403143706.jpg new file mode 100644 index 0000000..c31ad2e Binary files /dev/null and b/result_Rainy/20230403143706.jpg differ diff --git a/result_Rainy/20230403144318.jpg b/result_Rainy/20230403144318.jpg new file mode 100644 index 0000000..e7eabf4 Binary files /dev/null and b/result_Rainy/20230403144318.jpg differ diff --git a/result_Rainy/20230403144922.jpg b/result_Rainy/20230403144922.jpg new file mode 100644 index 0000000..b34ca5e Binary files /dev/null and b/result_Rainy/20230403144922.jpg differ diff --git a/result_Rainy/20230403145623.jpg b/result_Rainy/20230403145623.jpg new file mode 100644 index 0000000..220730b Binary files /dev/null and b/result_Rainy/20230403145623.jpg differ diff --git a/result_Rainy/20230403145646.jpg b/result_Rainy/20230403145646.jpg new file mode 100644 index 0000000..c17f84d Binary files /dev/null and b/result_Rainy/20230403145646.jpg differ diff --git a/result_Rainy/20230403145705.jpg b/result_Rainy/20230403145705.jpg new file mode 100644 index 0000000..844d4aa Binary files /dev/null and b/result_Rainy/20230403145705.jpg differ diff --git a/result_Rainy/20230403145728.jpg b/result_Rainy/20230403145728.jpg new file mode 100644 index 0000000..4f4d101 Binary files /dev/null and b/result_Rainy/20230403145728.jpg differ diff --git a/result_Rainy/20230403150326.jpg b/result_Rainy/20230403150326.jpg new file mode 100644 index 0000000..03494ab Binary files /dev/null and b/result_Rainy/20230403150326.jpg differ diff --git a/result_Rainy/20230403150331.jpg b/result_Rainy/20230403150331.jpg new file mode 100644 index 0000000..52eaca4 Binary files /dev/null and b/result_Rainy/20230403150331.jpg differ diff --git a/result_Rainy/20230403150352.jpg b/result_Rainy/20230403150352.jpg new file mode 100644 index 0000000..7884a6e Binary files /dev/null and b/result_Rainy/20230403150352.jpg differ diff --git a/result_Rainy/20230403150432.jpg b/result_Rainy/20230403150432.jpg new file mode 100644 index 0000000..f387a25 Binary files /dev/null and b/result_Rainy/20230403150432.jpg differ diff --git a/result_Rainy/20230403152406.jpg b/result_Rainy/20230403152406.jpg new file mode 100644 index 0000000..63c2ff4 Binary files /dev/null and b/result_Rainy/20230403152406.jpg differ diff --git a/result_Rainy/20230403152827.jpg b/result_Rainy/20230403152827.jpg new file mode 100644 index 0000000..f10d094 Binary files /dev/null and b/result_Rainy/20230403152827.jpg differ diff --git a/result_Rainy/20230403153033.jpg b/result_Rainy/20230403153033.jpg new file mode 100644 index 0000000..69b94c4 Binary files /dev/null and b/result_Rainy/20230403153033.jpg differ diff --git a/result_Rainy/20230403155233.jpg b/result_Rainy/20230403155233.jpg new file mode 100644 index 0000000..90490bf Binary files /dev/null and b/result_Rainy/20230403155233.jpg differ diff --git a/result_Rainy/20230403160720.jpg b/result_Rainy/20230403160720.jpg new file mode 100644 index 0000000..e93a780 Binary files /dev/null and b/result_Rainy/20230403160720.jpg differ diff --git a/result_Rainy/20230403161621.jpg b/result_Rainy/20230403161621.jpg new file mode 100644 index 0000000..00c31a9 Binary files /dev/null and b/result_Rainy/20230403161621.jpg differ diff --git a/result_Rainy/20230403161947.jpg b/result_Rainy/20230403161947.jpg new file mode 100644 index 0000000..1471848 Binary files /dev/null and b/result_Rainy/20230403161947.jpg differ diff --git a/result_Rainy/20230403162014.jpg b/result_Rainy/20230403162014.jpg new file mode 100644 index 0000000..9e0379d Binary files /dev/null and b/result_Rainy/20230403162014.jpg differ diff --git a/result_Rainy/20230403162753.jpg b/result_Rainy/20230403162753.jpg new file mode 100644 index 0000000..6dbd002 Binary files /dev/null and b/result_Rainy/20230403162753.jpg differ diff --git a/result_Rainy/20230403163831.jpg b/result_Rainy/20230403163831.jpg new file mode 100644 index 0000000..d197708 Binary files /dev/null and b/result_Rainy/20230403163831.jpg differ diff --git a/result_Rainy/20230403164326.jpg b/result_Rainy/20230403164326.jpg new file mode 100644 index 0000000..ad11b33 Binary files /dev/null and b/result_Rainy/20230403164326.jpg differ diff --git a/result_Rainy/20230403165419.jpg b/result_Rainy/20230403165419.jpg new file mode 100644 index 0000000..d5227a0 Binary files /dev/null and b/result_Rainy/20230403165419.jpg differ diff --git a/result_Rainy/20230403174904.jpg b/result_Rainy/20230403174904.jpg new file mode 100644 index 0000000..088ad20 Binary files /dev/null and b/result_Rainy/20230403174904.jpg differ diff --git a/result_Rainy/20230403200008.jpg b/result_Rainy/20230403200008.jpg new file mode 100644 index 0000000..9c7d937 Binary files /dev/null and b/result_Rainy/20230403200008.jpg differ diff --git a/result_Rainy/20230403204052.jpg b/result_Rainy/20230403204052.jpg new file mode 100644 index 0000000..ddac659 Binary files /dev/null and b/result_Rainy/20230403204052.jpg differ diff --git a/result_Rainy/20230403205331.jpg b/result_Rainy/20230403205331.jpg new file mode 100644 index 0000000..676e22a Binary files /dev/null and b/result_Rainy/20230403205331.jpg differ diff --git a/result_Rainy/20230403205353.jpg b/result_Rainy/20230403205353.jpg new file mode 100644 index 0000000..7a38034 Binary files /dev/null and b/result_Rainy/20230403205353.jpg differ diff --git a/result_Rainy/20230403205627.jpg b/result_Rainy/20230403205627.jpg new file mode 100644 index 0000000..dd5670f Binary files /dev/null and b/result_Rainy/20230403205627.jpg differ diff --git a/result_Rainy/20230403205637.jpg b/result_Rainy/20230403205637.jpg new file mode 100644 index 0000000..6684b99 Binary files /dev/null and b/result_Rainy/20230403205637.jpg differ diff --git a/result_Rainy/20230403205646.jpg b/result_Rainy/20230403205646.jpg new file mode 100644 index 0000000..626a375 Binary files /dev/null and b/result_Rainy/20230403205646.jpg differ diff --git a/result_Rainy/20230403205717.jpg b/result_Rainy/20230403205717.jpg new file mode 100644 index 0000000..59cbdeb Binary files /dev/null and b/result_Rainy/20230403205717.jpg differ diff --git a/result_Rainy/20230403205726.jpg b/result_Rainy/20230403205726.jpg new file mode 100644 index 0000000..c97e2ea Binary files /dev/null and b/result_Rainy/20230403205726.jpg differ diff --git a/result_Rainy/20230403205909.jpg b/result_Rainy/20230403205909.jpg new file mode 100644 index 0000000..c20435c Binary files /dev/null and b/result_Rainy/20230403205909.jpg differ diff --git a/result_Rainy/20230403205918.jpg b/result_Rainy/20230403205918.jpg new file mode 100644 index 0000000..cfd6be3 Binary files /dev/null and b/result_Rainy/20230403205918.jpg differ diff --git a/result_Rainy/20230403205939.jpg b/result_Rainy/20230403205939.jpg new file mode 100644 index 0000000..dcaabe7 Binary files /dev/null and b/result_Rainy/20230403205939.jpg differ diff --git a/result_Rainy/20230403205953.jpg b/result_Rainy/20230403205953.jpg new file mode 100644 index 0000000..aa02073 Binary files /dev/null and b/result_Rainy/20230403205953.jpg differ diff --git a/result_Rainy/20230403211808.jpg b/result_Rainy/20230403211808.jpg new file mode 100644 index 0000000..16d85de Binary files /dev/null and b/result_Rainy/20230403211808.jpg differ diff --git a/result_Rainy/20230404001927.jpg b/result_Rainy/20230404001927.jpg new file mode 100644 index 0000000..8d39451 Binary files /dev/null and b/result_Rainy/20230404001927.jpg differ diff --git a/result_Rainy/20230404003523.jpg b/result_Rainy/20230404003523.jpg new file mode 100644 index 0000000..a4c3fb2 Binary files /dev/null and b/result_Rainy/20230404003523.jpg differ diff --git a/result_Rainy/20230404003828.jpg b/result_Rainy/20230404003828.jpg new file mode 100644 index 0000000..5669f7e Binary files /dev/null and b/result_Rainy/20230404003828.jpg differ diff --git a/result_Rainy/20230404004015.jpg b/result_Rainy/20230404004015.jpg new file mode 100644 index 0000000..7ccf4a4 Binary files /dev/null and b/result_Rainy/20230404004015.jpg differ diff --git a/result_Rainy/20230404004137.jpg b/result_Rainy/20230404004137.jpg new file mode 100644 index 0000000..398e487 Binary files /dev/null and b/result_Rainy/20230404004137.jpg differ diff --git a/result_Rainy/20230404033237.jpg b/result_Rainy/20230404033237.jpg new file mode 100644 index 0000000..60b6c45 Binary files /dev/null and b/result_Rainy/20230404033237.jpg differ diff --git a/result_Rainy/20230404033324.jpg b/result_Rainy/20230404033324.jpg new file mode 100644 index 0000000..6c44d55 Binary files /dev/null and b/result_Rainy/20230404033324.jpg differ diff --git a/result_Rainy/20230404052913.jpg b/result_Rainy/20230404052913.jpg new file mode 100644 index 0000000..9a7898f Binary files /dev/null and b/result_Rainy/20230404052913.jpg differ diff --git a/result_Rainy/20230404053155.jpg b/result_Rainy/20230404053155.jpg new file mode 100644 index 0000000..b4f0d36 Binary files /dev/null and b/result_Rainy/20230404053155.jpg differ diff --git a/result_Rainy/20230404062844.jpg b/result_Rainy/20230404062844.jpg new file mode 100644 index 0000000..be4203e Binary files /dev/null and b/result_Rainy/20230404062844.jpg differ diff --git a/result_Rainy/20230404062851.jpg b/result_Rainy/20230404062851.jpg new file mode 100644 index 0000000..111ff8d Binary files /dev/null and b/result_Rainy/20230404062851.jpg differ diff --git a/result_Rainy/20230404063557.jpg b/result_Rainy/20230404063557.jpg new file mode 100644 index 0000000..7d878b4 Binary files /dev/null and b/result_Rainy/20230404063557.jpg differ diff --git a/result_Rainy/错误.jpg b/result_Rainy/错误.jpg new file mode 100644 index 0000000..15283db Binary files /dev/null and b/result_Rainy/错误.jpg differ diff --git a/result_Rainy/错误2.jpg b/result_Rainy/错误2.jpg new file mode 100644 index 0000000..616707c Binary files /dev/null and b/result_Rainy/错误2.jpg differ diff --git a/result_Rainy/错误3.jpg b/result_Rainy/错误3.jpg new file mode 100644 index 0000000..558f211 Binary files /dev/null and b/result_Rainy/错误3.jpg differ diff --git a/result_Rainy/错误4.jpg b/result_Rainy/错误4.jpg new file mode 100644 index 0000000..0885078 Binary files /dev/null and b/result_Rainy/错误4.jpg differ diff --git a/test.py b/test.py new file mode 100644 index 0000000..dbee407 --- /dev/null +++ b/test.py @@ -0,0 +1,337 @@ +import argparse +import json +import os +from pathlib import Path +from threading import Thread + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +from models.experimental import attempt_load +# from utils.datasets import create_dataloader +from utils.face_datasets import create_dataloader +from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \ + non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, non_max_suppression_face +from utils.loss import compute_loss +from utils.metrics import ap_per_class, ConfusionMatrix +from utils.plots import plot_images, output_to_target, plot_study_txt +from utils.torch_utils import select_device, time_synchronized + + +def test(data, + weights=None, + batch_size=32, + imgsz=640, + conf_thres=0.001, + iou_thres=0.6, # for NMS + save_json=False, + single_cls=False, + augment=False, + verbose=False, + model=None, + dataloader=None, + save_dir=Path(''), # for saving images + save_txt=False, # for auto-labelling + save_hybrid=False, # for hybrid auto-labelling + save_conf=False, # save auto-label confidences + plots=True, + log_imgs=0): # number of logged images + + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device = next(model.parameters()).device # get model device + + else: # called directly + set_logging() + device = select_device(opt.device, batch_size=batch_size) + + # Directories + save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = attempt_load(weights, map_location=device) # load FP32 model + imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size + + # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 + # if device.type != 'cpu' and torch.cuda.device_count() > 1: + # model = nn.DataParallel(model) + + # Half + half = device.type != 'cpu' # half precision only supported on CUDA + if half: + model.half() + + # Configure + model.eval() + is_coco = data.endswith('coco.yaml') # is COCO dataset + with open(data) as f: + data = yaml.load(f, Loader=yaml.FullLoader) # model dict + check_dataset(data) # check + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Logging + log_imgs, wandb = min(log_imgs, 100), None # ceil + try: + import wandb # Weights & Biases + except ImportError: + log_imgs = 0 + + # Dataloader + if not training: + img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img + _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once + path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images + dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True)[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} + coco91class = coco80_to_coco91_class() + s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. + loss = torch.zeros(3, device=device) + jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] + for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + img = img.to(device, non_blocking=True) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + targets = targets.to(device) + nb, _, height, width = img.shape # batch size, channels, height, width + + with torch.no_grad(): + # Run model + t = time_synchronized() + inf_out, train_out = model(img, augment=augment) # inference and training outputs + t0 += time_synchronized() - t + + # Compute loss + if training: + loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls + + # Run NMS + targets[:, 2:6] *= torch.Tensor([width, height, width, height]).to(device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + t = time_synchronized() + #output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) + output = non_max_suppression_face(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) + t1 += time_synchronized() - t + + # Statistics per image + for si, pred in enumerate(output): + pred = torch.cat((pred[:, :5], pred[:, 13:]), 1) # throw landmark in thresh + labels = targets[targets[:, 0] == si, 1:] + nl = len(labels) + tcls = labels[:, 0].tolist() if nl else [] # target class + path = Path(paths[si]) + seen += 1 + + if len(pred) == 0: + if nl: + stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) + continue + + # Predictions + predn = pred.clone() + scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred + + # Append to text file + if save_txt: + gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + # W&B logging + if plots and len(wandb_images) < log_imgs: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) + + # Append to pycocotools JSON dictionary + if save_json: + # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(pred.tolist(), box.tolist()): + jdict.append({'image_id': image_id, + 'category_id': coco91class[int(p[15])] if is_coco else int(p[15]), + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + # Assign all predictions as incorrect + correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) + if nl: + detected = [] # target indices + tcls_tensor = labels[:, 0] + + # target boxes + tbox = xywh2xyxy(labels[:, 1:5]) + scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels + if plots: + confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1)) + + # Per target class + for cls in torch.unique(tcls_tensor): + ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices + pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices + + # Search for detections + if pi.shape[0]: + # Prediction to target ious + ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices + + # Append detections + detected_set = set() + for j in (ious > iouv[0]).nonzero(as_tuple=False): + d = ti[i[j]] # detected target + if d.item() not in detected_set: + detected_set.add(d.item()) + detected.append(d) + correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn + if len(detected) == nl: # all targets already located in image + break + + # Append statistics (correct, conf, pcls, tcls) + stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) + + # Plot images + if plots and batch_i < 3: + f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels + Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() + f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions + Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start() + + # Compute statistics + stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) + p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95] + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() + nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class + else: + nt = torch.zeros(1) + + # Print results + pf = '%20s' + '%12.3g' * 6 # print format + print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + + # Print results per class + if verbose and nc > 1 and len(stats): + for i, c in enumerate(ap_class): + print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + + # Print speeds + t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple + if not training: + print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + if wandb and wandb.run: + wandb.log({"Images": wandb_images}) + wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]}) + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = '../coco/annotations/instances_val2017.json' # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + eval = COCOeval(anno, pred, 'bbox') + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) + except Exception as e: + print(f'pycocotools unable to run: {e}') + + # Return results + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + print(f"Results saved to {save_dir}{s}") + model.float() # for training + maps = np.zeros(nc) + map + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(prog='test.py') + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') + parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') + parser.add_argument('--task', default='val', help="'val', 'test', 'study'") + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') + parser.add_argument('--project', default='runs/test', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + opt = parser.parse_args() + opt.save_json |= opt.data.endswith('coco.yaml') + opt.data = check_file(opt.data) # check file + print(opt) + + if opt.task in ['val', 'test']: # run normally + test(opt.data, + opt.weights, + opt.batch_size, + opt.img_size, + opt.conf_thres, + opt.iou_thres, + opt.save_json, + opt.single_cls, + opt.augment, + opt.verbose, + save_txt=opt.save_txt | opt.save_hybrid, + save_hybrid=opt.save_hybrid, + save_conf=opt.save_conf, + ) + + elif opt.task == 'study': # run over a range of settings and save/plot + for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: + f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to + x = list(range(320, 800, 64)) # x axis + y = [] # y axis + for i in x: # img-size + print('\nRunning %s point %s...' % (f, i)) + r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, + plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_study_txt(f, x) # plot diff --git a/test_widerface.py b/test_widerface.py new file mode 100644 index 0000000..31bb113 --- /dev/null +++ b/test_widerface.py @@ -0,0 +1,170 @@ +import argparse +import glob +import time +from pathlib import Path + +import os +import cv2 +import torch +import torch.backends.cudnn as cudnn +from numpy import random +import numpy as np +from models.experimental import attempt_load +from utils.datasets import letterbox +from utils.general import check_img_size, check_requirements, non_max_suppression_face, apply_classifier, \ + scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path +from utils.plots import plot_one_box +from utils.torch_utils import select_device, load_classifier, time_synchronized +from tqdm import tqdm + +def dynamic_resize(shape, stride=64): + max_size = max(shape[0], shape[1]) + if max_size % stride != 0: + max_size = (int(max_size / stride) + 1) * stride + return max_size + +def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding + coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding + coords[:, :10] /= gain + #clip_coords(coords, img0_shape) + coords[:, 0].clamp_(0, img0_shape[1]) # x1 + coords[:, 1].clamp_(0, img0_shape[0]) # y1 + coords[:, 2].clamp_(0, img0_shape[1]) # x2 + coords[:, 3].clamp_(0, img0_shape[0]) # y2 + coords[:, 4].clamp_(0, img0_shape[1]) # x3 + coords[:, 5].clamp_(0, img0_shape[0]) # y3 + coords[:, 6].clamp_(0, img0_shape[1]) # x4 + coords[:, 7].clamp_(0, img0_shape[0]) # y4 + coords[:, 8].clamp_(0, img0_shape[1]) # x5 + coords[:, 9].clamp_(0, img0_shape[0]) # y5 + return coords + +def show_results(img, xywh, conf, landmarks, class_num): + h,w,c = img.shape + tl = 1 or round(0.002 * (h + w) / 2) + 1 # line/font thickness + x1 = int(xywh[0] * w - 0.5 * xywh[2] * w) + y1 = int(xywh[1] * h - 0.5 * xywh[3] * h) + x2 = int(xywh[0] * w + 0.5 * xywh[2] * w) + y2 = int(xywh[1] * h + 0.5 * xywh[3] * h) + cv2.rectangle(img, (x1,y1), (x2, y2), (0,255,0), thickness=tl, lineType=cv2.LINE_AA) + + clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)] + + for i in range(5): + point_x = int(landmarks[2 * i] * w) + point_y = int(landmarks[2 * i + 1] * h) + cv2.circle(img, (point_x, point_y), tl+1, clors[i], -1) + + tf = max(tl - 1, 1) # font thickness + label = str(int(class_num)) + ': ' + str(conf)[:5] + cv2.putText(img, label, (x1, y1 - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + return img + +def detect(model, img0): + stride = int(model.stride.max()) # model stride + imgsz = opt.img_size + if imgsz <= 0: # original size + imgsz = dynamic_resize(img0.shape) + imgsz = check_img_size(imgsz, s=64) # check img_size + img = letterbox(img0, imgsz)[0] + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + img = torch.from_numpy(img).to(device) + img = img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + # Inference + pred = model(img, augment=opt.augment)[0] + # Apply NMS + pred = non_max_suppression_face(pred, opt.conf_thres, opt.iou_thres)[0] + gn = torch.tensor(img0.shape)[[1, 0, 1, 0]].to(device) # normalization gain whwh + gn_lks = torch.tensor(img0.shape)[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]].to(device) # normalization gain landmarks + boxes = [] + h, w, c = img0.shape + if pred is not None: + pred[:, :4] = scale_coords(img.shape[2:], pred[:, :4], img0.shape).round() + pred[:, 5:15] = scale_coords_landmarks(img.shape[2:], pred[:, 5:15], img0.shape).round() + for j in range(pred.size()[0]): + xywh = (xyxy2xywh(pred[j, :4].view(1, 4)) / gn).view(-1) + xywh = xywh.data.cpu().numpy() + conf = pred[j, 4].cpu().numpy() + landmarks = (pred[j, 5:15].view(1, 10) / gn_lks).view(-1).tolist() + class_num = pred[j, 15].cpu().numpy() + x1 = int(xywh[0] * w - 0.5 * xywh[2] * w) + y1 = int(xywh[1] * h - 0.5 * xywh[3] * h) + x2 = int(xywh[0] * w + 0.5 * xywh[2] * w) + y2 = int(xywh[1] * h + 0.5 * xywh[3] * h) + boxes.append([x1, y1, x2-x1, y2-y1, conf]) + return boxes + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp5/weights/last.pt', help='model.pt path(s)') + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.02, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') + parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') + parser.add_argument('--project', default='runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results') + parser.add_argument('--dataset_folder', default='../WiderFace/val/images/', type=str, help='dataset path') + parser.add_argument('--folder_pict', default='/yolov5-face/data/widerface/val/wider_val.txt', type=str, help='folder_pict') + opt = parser.parse_args() + print(opt) + + # changhy : read folder_pict + pict_folder = {} + with open(opt.folder_pict, 'r') as f: + lines = f.readlines() + for line in lines: + line = line.strip().split('/') + pict_folder[line[-1]] = line[-2] + + # Load model + device = select_device(opt.device) + model = attempt_load(opt.weights, map_location=device) # load FP32 model + with torch.no_grad(): + # testing dataset + testset_folder = opt.dataset_folder + + for image_path in tqdm(glob.glob(os.path.join(testset_folder, '*'))): + if image_path.endswith('.txt'): + continue + img0 = cv2.imread(image_path) # BGR + if img0 is None: + print(f'ignore : {image_path}') + continue + boxes = detect(model, img0) + # -------------------------------------------------------------------- + image_name = os.path.basename(image_path) + txt_name = os.path.splitext(image_name)[0] + ".txt" + save_name = os.path.join(opt.save_folder, pict_folder[image_name], txt_name) + dirname = os.path.dirname(save_name) + if not os.path.isdir(dirname): + os.makedirs(dirname) + with open(save_name, "w") as fd: + file_name = os.path.basename(save_name)[:-4] + "\n" + bboxs_num = str(len(boxes)) + "\n" + fd.write(file_name) + fd.write(bboxs_num) + for box in boxes: + fd.write('%d %d %d %d %.03f' % (box[0], box[1], box[2], box[3], box[4] if box[4] <= 1 else 1) + '\n') + print('done.') diff --git a/torch2trt/imgs/yolov5l-face.jpg b/torch2trt/imgs/yolov5l-face.jpg new file mode 100644 index 0000000..0b0049f Binary files /dev/null and b/torch2trt/imgs/yolov5l-face.jpg differ diff --git a/torch2trt/imgs/yolov5m-face.jpg b/torch2trt/imgs/yolov5m-face.jpg new file mode 100644 index 0000000..20f4be6 Binary files /dev/null and b/torch2trt/imgs/yolov5m-face.jpg differ diff --git a/torch2trt/imgs/yolov5n-0.5.jpg b/torch2trt/imgs/yolov5n-0.5.jpg new file mode 100644 index 0000000..b3176ce Binary files /dev/null and b/torch2trt/imgs/yolov5n-0.5.jpg differ diff --git a/torch2trt/imgs/yolov5n-face.jpg b/torch2trt/imgs/yolov5n-face.jpg new file mode 100644 index 0000000..ab71fb8 Binary files /dev/null and b/torch2trt/imgs/yolov5n-face.jpg differ diff --git a/torch2trt/imgs/yolov5s-face.jpg b/torch2trt/imgs/yolov5s-face.jpg new file mode 100644 index 0000000..395c7fd Binary files /dev/null and b/torch2trt/imgs/yolov5s-face.jpg differ diff --git a/torch2trt/main.py b/torch2trt/main.py new file mode 100644 index 0000000..95b1537 --- /dev/null +++ b/torch2trt/main.py @@ -0,0 +1,98 @@ +import os +import sys +import cv2 +import copy +import torch +import argparse +root_path=os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 项目根路径:获取当前路径,再上级路径 +sys.path.append(root_path) # 将项目根路径写入系统路径 +from utils.general import check_img_size,non_max_suppression_face,scale_coords,xyxy2xywh +from utils.datasets import letterbox +from detect_plate import scale_coords_landmarks,show_results +from torch2trt.trt_model import TrtModel +cur_path=os.path.abspath(os.path.dirname(__file__)) +def img_process(img_path,long_side=640,stride_max=32): + ''' + 图像预处理 + ''' + orgimg=cv2.imread(img_path) + img0 = copy.deepcopy(orgimg) + h0, w0 = orgimg.shape[:2] # orig hw + r = long_side/ max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR + img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp) + + imgsz = check_img_size(long_side, s=stride_max) # check img_size + + img = letterbox(img0, new_shape=imgsz,auto=False)[0] # auto True最小矩形 False固定尺度 + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416 + img = torch.from_numpy(img) + img = img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + return img,orgimg + +def img_vis(img,orgimg,pred,vis_thres = 0.6): + ''' + 预测可视化 + vis_thres: 可视化阈值 + ''' + + print('img.shape: ', img.shape) + print('orgimg.shape: ', orgimg.shape) + + no_vis_nums=0 + # Process detections + for i, det in enumerate(pred): # detections per image + gn = torch.tensor(orgimg.shape)[[1, 0, 1, 0]] # normalization gain whwh + gn_lks = torch.tensor(orgimg.shape)[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]] # normalization gain landmarks + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], orgimg.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + + det[:, 5:15] = scale_coords_landmarks(img.shape[2:], det[:, 5:15], orgimg.shape).round() + + for j in range(det.size()[0]): + + + if det[j, 4].cpu().numpy() < vis_thres: + no_vis_nums+=1 + continue + + xywh = (xyxy2xywh(det[j, :4].view(1, 4)) / gn).view(-1).tolist() + conf = det[j, 4].cpu().numpy() + landmarks = (det[j, 5:15].view(1, 10) / gn_lks).view(-1).tolist() + class_num = det[j, 15].cpu().numpy() + orgimg = show_results(orgimg, xywh, conf, landmarks, class_num) + + cv2.imwrite(cur_path+'/result.jpg', orgimg) + print('result save in '+cur_path+'/result.jpg') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--img_path', type=str, default=cur_path+"/sample.jpg", help='img path') + parser.add_argument('--trt_path', type=str, required=True, help='trt_path') + parser.add_argument('--output_shape', type=list, default=[1,25200,16], help='input[1,3,640,640] -> output[1,25200,16]') + opt = parser.parse_args() + + + img,orgimg=img_process(opt.img_path) + model=TrtModel(opt.trt_path) + pred=model(img.numpy()).reshape(opt.output_shape) # forward + model.destroy() + + # Apply NMS + pred = non_max_suppression_face(torch.from_numpy(pred), conf_thres=0.3, iou_thres=0.5) + + # ============可视化================ + img_vis(img,orgimg,pred) + + diff --git a/torch2trt/readme.md b/torch2trt/readme.md new file mode 100644 index 0000000..8092d2f --- /dev/null +++ b/torch2trt/readme.md @@ -0,0 +1,68 @@ +English | [简体中文](readme_CN.md) + + + +# Overall process + +## 1.Pytorch->TensorRT + + ```shell + python export.py --weights "torch's path" --onnx2trt --fp16_trt + ``` + + +## 2.TensorRT inference +```shell +python torch2trt/main.py --trt_path "trt's path" +``` +Image preprocessing -> TensorRT inference -> visualization + + + +# Time-consuming comparison + +| Backbone |Pytorch(ms) |TensorRT_FP16(ms) | +|:---:|:----:|:----:| +|yolov5n-0.5| 7.7 | 2.1 | +|yolov5n-face| 7.7 | 2.4 | +|yolov5s-face| 5.6 | 2.2 | +|yolov5m-face| 9.9 | 3.3 | +|yolov5l-face| 15.9 | 4.5 | + +> Pytorch=1.10.0+cu102 TensorRT=8.2.0.6 Hardware=rtx2080ti + +```shell +python torch2trt/speed.py --torch_path "torch's path" --trt_path "trt's path" +``` + + + +# Visualization + + + + + + + + + + +
yolov5n-0.5yolov5n-face
+ + + + + + + + + + + + +
yolov5s-faceyolov5m-faceyolov5l-face
+ + + + diff --git a/torch2trt/readme_CN.md b/torch2trt/readme_CN.md new file mode 100644 index 0000000..04a8974 --- /dev/null +++ b/torch2trt/readme_CN.md @@ -0,0 +1,65 @@ + + +# 整体流程 + +## 1.Pytorch->TensorRT + + ```shell + python export.py --weights "torch权重路径" --onnx2trt --fp16_trt + ``` + + +## 2.TensorRT推理 +```shell +python torch2trt/main.py --trt_path "trt权重路径" +``` + +图像预处理 -> TensorRT推理 -> 可视化结果 + + + +# 耗时对比 + +| | Pytorch(ms) | TensorRT_FP16(ms) | +|:---:|:----:|:----:| +| yolov5n-0.5 | 7.7 | 2.1 | +| yolov5n-face | 7.7 | 2.4 | +| yolov5s-face | 5.6 | 2.2 | +| yolov5m-face | 9.9 | 3.3 | +| yolov5l-face | 15.9 | 4.5 | + +> Pytorch=1.10.0+cu102 TensorRT=8.2.0.6 Hardware=rtx2080ti + +```shell +python torch2trt/speed.py --torch_path "torch权重路径" --trt_path "trt权重路径" +``` + + + +# 可视化 + + + + + + + + + + +
yolov5n-0.5yolov5n-face
+ + + + + + + + + + + + +
yolov5s-faceyolov5m-faceyolov5l-face
+ + diff --git a/torch2trt/sample.jpg b/torch2trt/sample.jpg new file mode 100644 index 0000000..7eda4f3 Binary files /dev/null and b/torch2trt/sample.jpg differ diff --git a/torch2trt/speed.py b/torch2trt/speed.py new file mode 100644 index 0000000..bc95d49 --- /dev/null +++ b/torch2trt/speed.py @@ -0,0 +1,49 @@ +from models.experimental import attempt_load +from torch2trt.trt_model import TrtModel +import argparse +import torch +import time +from tqdm import tqdm + + +def run(model,img,warmup_iter,iter): + + + print('start warm up...') + for _ in tqdm(range(warmup_iter)): + model(img) + + + print('start calculate...') + torch.cuda.synchronize() + start = time.time() + for __ in tqdm(range(iter)): + model(img) + torch.cuda.synchronize() + end = time.time() + return ((end - start) * 1000)/float(iter) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--torch_path', type=str,required=True, help='torch weights path') + parser.add_argument('--trt_path', type=str,required=True, help='tensorrt weights path') + + parser.add_argument('--device', type=int,default=0, help='cuda device') + parser.add_argument('--img_shape', type=list,default=[1,3,640,640], help='tensorrt weights path') + parser.add_argument('--warmup_iter', type=int, default=100,help='warm up iter') + parser.add_argument('--iter', type=int, default=300,help='average elapsed time of iterations') + opt = parser.parse_args() + + + # -----------------------torch----------------------------------------- + img = torch.zeros(opt.img_shape) + model = attempt_load(opt.torch_path, map_location=torch.device('cpu')) # load FP32 model + model.eval() + total_time=run(model.to(opt.device),img.to(opt.device),opt.warmup_iter,opt.iter) + print('Pytorch is %.2f ms/img'%total_time) + + # -----------------------tensorrt----------------------------------------- + model=TrtModel(opt.trt_path) + total_time=run(model,img.numpy(),opt.warmup_iter,opt.iter) + model.destroy() + print('TensorRT is %.2f ms/img'%total_time) diff --git a/torch2trt/trt_model.py b/torch2trt/trt_model.py new file mode 100644 index 0000000..b6d46a9 --- /dev/null +++ b/torch2trt/trt_model.py @@ -0,0 +1,118 @@ +import pycuda.autoinit +import pycuda.driver as cuda +import tensorrt as trt +import numpy as np + +EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) +TRT_LOGGER = trt.Logger(trt.Logger.WARNING) +def GiB(val): + return val * 1 << 30 + +def ONNX_to_TRT(onnx_model_path=None,trt_engine_path=None,fp16_mode=False): + """ + 仅适用TensorRT V8版本 + 生成cudaEngine,并保存引擎文件(仅支持固定输入尺度) + + fp16_mode: True则fp16预测 + onnx_model_path: 将加载的onnx权重路径 + trt_engine_path: trt引擎文件保存路径 + """ + builder = trt.Builder(TRT_LOGGER) + network = builder.create_network(EXPLICIT_BATCH) + parser = trt.OnnxParser(network, TRT_LOGGER) + + config = builder.create_builder_config() + config.max_workspace_size=GiB(1) + if fp16_mode: + config.set_flag(trt.BuilderFlag.FP16) + with open(onnx_model_path, 'rb') as model: + assert parser.parse(model.read()) + serialized_engine=builder.build_serialized_network(network, config) + + + with open(trt_engine_path, 'wb') as f: + f.write(serialized_engine) # 序列化 + + print('TensorRT file in ' + trt_engine_path) + print('============ONNX->TensorRT SUCCESS============') + +class TrtModel(): + ''' + TensorRT infer + ''' + def __init__(self,trt_path): + self.ctx=cuda.Device(0).make_context() + stream = cuda.Stream() + TRT_LOGGER = trt.Logger(trt.Logger.INFO) + runtime = trt.Runtime(TRT_LOGGER) + + # Deserialize the engine from file + with open(trt_path, "rb") as f: + engine = runtime.deserialize_cuda_engine(f.read()) + context = engine.create_execution_context() + + host_inputs = [] + cuda_inputs = [] + host_outputs = [] + cuda_outputs = [] + bindings = [] + + for binding in engine: + print('bingding:', binding, engine.get_binding_shape(binding)) + size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size + dtype = trt.nptype(engine.get_binding_dtype(binding)) + # Allocate host and device buffers + host_mem = cuda.pagelocked_empty(size, dtype) + cuda_mem = cuda.mem_alloc(host_mem.nbytes) + # Append the device buffer to device bindings. + bindings.append(int(cuda_mem)) + # Append to the appropriate list. + if engine.binding_is_input(binding): + self.input_w = engine.get_binding_shape(binding)[-1] + self.input_h = engine.get_binding_shape(binding)[-2] + host_inputs.append(host_mem) + cuda_inputs.append(cuda_mem) + else: + host_outputs.append(host_mem) + cuda_outputs.append(cuda_mem) + + # Store + self.stream = stream + self.context = context + self.engine = engine + self.host_inputs = host_inputs + self.cuda_inputs = cuda_inputs + self.host_outputs = host_outputs + self.cuda_outputs = cuda_outputs + self.bindings = bindings + self.batch_size = engine.max_batch_size + + def __call__(self,img_np_nchw): + ''' + TensorRT推理 + :param img_np_nchw: 输入图像 + ''' + self.ctx.push() + + # Restore + stream = self.stream + context = self.context + engine = self.engine + host_inputs = self.host_inputs + cuda_inputs = self.cuda_inputs + host_outputs = self.host_outputs + cuda_outputs = self.cuda_outputs + bindings = self.bindings + + np.copyto(host_inputs[0], img_np_nchw.ravel()) + cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream) + context.execute_async(batch_size=self.batch_size, bindings=bindings, stream_handle=stream.handle) + cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream) + stream.synchronize() + self.ctx.pop() + return host_outputs[0] + + + def destroy(self): + # Remove any context from the top of the context stack, deactivating it. + self.ctx.pop() diff --git a/train.py b/train.py new file mode 100644 index 0000000..7eb5560 --- /dev/null +++ b/train.py @@ -0,0 +1,602 @@ +import argparse +import logging +import math +import os +import random +import time +from pathlib import Path +from threading import Thread +from warnings import warn + +import numpy as np +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch.optim.lr_scheduler as lr_scheduler +import torch.utils.data +import yaml +from torch.cuda import amp +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter +from tqdm import tqdm + +import test # import test.py to get mAP after each epoch +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.face_datasets import create_dataloader +from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ + fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ + print_mutation, set_logging +from utils.google_utils import attempt_download +from utils.loss import compute_loss +from utils.plots import plot_images, plot_labels, plot_results, plot_evolution +from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first + +logger = logging.getLogger(__name__) +begin_save=1 +try: + import wandb +except ImportError: + wandb = None + logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)") + + +def train(hyp, opt, device, tb_writer=None, wandb=None): + logger.info(f'Hyperparameters {hyp}') + save_dir, epochs, batch_size, total_batch_size, weights, rank = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank + + # Directories + wdir = save_dir / 'weights' + wdir.mkdir(parents=True, exist_ok=True) # make dir + last = wdir / 'last.pt' + best = wdir / 'best.pt' + results_file = save_dir / 'results.txt' + + # Save run settings + with open(save_dir / 'hyp.yaml', 'w') as f: + yaml.dump(hyp, f, sort_keys=False) + with open(save_dir / 'opt.yaml', 'w') as f: + yaml.dump(vars(opt), f, sort_keys=False) + + # Configure + plots = not opt.evolve # create plots + cuda = device.type != 'cpu' + init_seeds(2 + rank) + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict + with torch_distributed_zero_first(rank): + check_dataset(data_dict) # check + train_path = data_dict['train'] + test_path = data_dict['val'] + nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes + names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check + + # Model + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(rank): + attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location=device) # load checkpoint + if hyp.get('anchors'): + ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor + model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create + exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys + state_dict = ckpt['model'].float().state_dict() # to FP32 + state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(state_dict, strict=False) # load + logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report + else: + model = Model(opt.cfg, ch=3, nc=nc).to(device) # create + + # Freeze + freeze = [] # parameter names to freeze (full or partial) + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + if any(x in k for x in freeze): + print('freezing %s' % k) + v.requires_grad = False + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay + + pg0, pg1, pg2 = [], [], [] # optimizer parameter groups + for k, v in model.named_modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): + pg2.append(v.bias) # biases + if isinstance(v, nn.BatchNorm2d): + pg0.append(v.weight) # no decay + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): + pg1.append(v.weight) # apply decay + + if opt.adam: + optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + else: + optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) + + optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay + optimizer.add_param_group({'params': pg2}) # add pg2 (biases) + logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) + del pg0, pg1, pg2 + + # Scheduler https://arxiv.org/pdf/1812.01187.pdf + # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR + lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # plot_lr_scheduler(optimizer, scheduler, epochs) + + # Logging + if wandb and wandb.run is None: + opt.hyp = hyp # add hyperparameters + wandb_run = wandb.init(config=opt, resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + name=save_dir.stem, + id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) + loggers = {'wandb': wandb} # loggers dict + + # Resume + start_epoch, best_fitness = 0, 0.0 + if pretrained: + # Optimizer + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) + best_fitness = 0 + + # Results + if ckpt.get('training_results') is not None: + with open(results_file, 'w') as file: + file.write(ckpt['training_results']) # write results.txt + + # Epochs + start_epoch = ckpt['epoch'] + 1 + if opt.resume: + assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) + if epochs < start_epoch: + logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % + (weights, ckpt['epoch'], epochs)) + epochs += ckpt['epoch'] # finetune additional epochs + + del ckpt, state_dict + + # Image sizes + gs = int(max(model.stride)) # grid size (max stride) + imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples + + # DP mode + if cuda and rank == -1 and torch.cuda.device_count() > 1: + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and rank != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + logger.info('Using SyncBatchNorm()') + + # EMA + ema = ModelEMA(model) if rank in [-1, 0] else None + + # DDP mode + if cuda and rank != -1: + model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) + + # Trainloader + dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, + hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, + world_size=opt.world_size, workers=opt.workers, + image_weights=opt.image_weights) + mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class + nb = len(dataloader) # number of batches + assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) + + # Process 0 + if rank in [-1, 0]: + ema.updates = start_epoch * nb // accumulate # set EMA updates + testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, # testloader + hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, + rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0] + + if not opt.resume: + labels = np.concatenate(dataset.labels, 0) + c = torch.tensor(labels[:, 0]) # classes + # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency + # model._initialize_biases(cf.to(device)) + if plots: + plot_labels(labels, save_dir, loggers) + if tb_writer: + tb_writer.add_histogram('classes', c, 0) + + # Anchors + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + + # Model parameters + hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = amp.GradScaler(enabled=cuda) + logger.info('Image sizes %g train, %g test\n' + 'Using %g dataloader workers\nLogging results to %s\n' + 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs)) + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + model.train() + + # Update image weights (optional) + if opt.image_weights: + # Generate indices + if rank in [-1, 0]: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + # Broadcast if DDP + if rank != -1: + indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() + dist.broadcast(indices, 0) + if rank != 0: + dataset.indices = indices.cpu().numpy() + + # Update mosaic border + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(5, device=device) # mean losses + if rank != -1: + dataloader.sampler.set_epoch(epoch) + pbar = enumerate(dataloader) + logger.info(('\n' + '%10s' * 9) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'landmark', 'total', 'targets', 'img_size')) + if rank in [-1, 0]: + pbar = tqdm(pbar, total=nb) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with amp.autocast(enabled=cuda): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size + if rank != -1: + loss *= opt.world_size # gradient averaged between devices in DDP mode + + # Backward + scaler.scale(loss).backward() + + # Optimize + if ni % accumulate == 0: + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + + # Print + if rank in [-1, 0]: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) + s = ('%10s' * 2 + '%10.4g' * 7) % ( + '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) + pbar.set_description(s) + + # Plot + if plots and ni < 3: + f = save_dir / f'train_batch{ni}.jpg' # filename + Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + # if tb_writer: + # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) + # tb_writer.add_graph(model, imgs) # add model to tensorboard + elif plots and ni == 3 and wandb: + wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]}) + + # end batch ------------------------------------------------------------------------------------------------ + # end epoch ---------------------------------------------------------------------------------------------------- + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard + scheduler.step() + + # DDP process 0 or single-GPU + if rank in [-1, 0] and epoch > begin_save: + # mAP + if ema: + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) + final_epoch = epoch + 1 == epochs + if not opt.notest or final_epoch: # Calculate mAP + results, maps, times = test.test(opt.data, + batch_size=total_batch_size, + imgsz=imgsz_test, + model=ema.ema, + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + plots=False, + log_imgs=opt.log_imgs if wandb else 0) + + # Write + with open(results_file, 'a') as f: + f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + if len(opt.name) and opt.bucket: + os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) + + # Log + tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss + 'x/lr0', 'x/lr1', 'x/lr2'] # params + for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): + if tb_writer: + tb_writer.add_scalar(tag, x, epoch) # tensorboard + if wandb: + wandb.log({tag: x}) # W&B + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + if fi > best_fitness: + best_fitness = fi + + # Save model + save = (not opt.nosave) or (final_epoch and not opt.evolve) + if save: + with open(results_file, 'r') as f: # create checkpoint + ckpt = {'epoch': epoch, + 'best_fitness': best_fitness, + 'training_results': f.read(), + 'model': ema.ema, + 'optimizer': None if final_epoch else optimizer.state_dict(), + 'wandb_id': wandb_run.id if wandb else None} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + ckpt_best = { + 'epoch': epoch, + 'best_fitness': best_fitness, + # 'training_results': f.read(), + 'model': ema.ema, + # 'optimizer': None if final_epoch else optimizer.state_dict(), + # 'wandb_id': wandb_run.id if wandb else None + } + torch.save(ckpt_best, best) + del ckpt + # end epoch ---------------------------------------------------------------------------------------------------- + # end training + + if rank in [-1, 0]: + # Strip optimizers + final = best if best.exists() else last # final model + for f in [last, best]: + if f.exists(): + strip_optimizer(f) # strip optimizers + if opt.bucket: + os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload + + # Plots + if plots: + plot_results(save_dir=save_dir) # save as results.png + if wandb: + files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png'] + wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files + if (save_dir / f).exists()]}) + if opt.log_artifacts: + wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem) + + # Test best.pt + logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) + if opt.data.endswith('coco.yaml') and nc == 80: # if COCO + for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests + results, _, _ = test.test(opt.data, + batch_size=total_batch_size, + imgsz=imgsz_test, + conf_thres=conf, + iou_thres=iou, + model=attempt_load(final, device).half(), + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=save_json, + plots=False) + + else: + dist.destroy_process_group() + + wandb.run.finish() if wandb and wandb.run else None + torch.cuda.empty_cache() + return results + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='weights/plate_detect.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='models/yolov5n-0.5.yaml', help='model.yaml path') + parser.add_argument('--data', type=str, default='data/widerface.yaml', help='data.yaml path') + parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=120) + parser.add_argument('--batch-size', type=int, default=32, help='total batch size for all GPUs') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--notest', action='store_true', help='only test final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') + parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', default=True, help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') + parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100') + parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model') + parser.add_argument('--workers', type=int, default=4, help='maximum number of dataloader workers') + parser.add_argument('--project', default='runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + opt = parser.parse_args() + + # Set DDP variables + opt.total_batch_size = opt.batch_size + opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 + opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 + set_logging(opt.global_rank) + # if opt.global_rank in [-1, 0]: + # check_git_status() + + # Resume + if opt.resume: # resume an interrupted run + ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path + assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' + with open(Path(ckpt).parent.parent / 'opt.yaml') as f: + opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace + opt.cfg, opt.weights, opt.resume = '', ckpt, True + logger.info('Resuming training from %s' % ckpt) + else: + # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') + opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) + opt.name = 'evolve' if opt.evolve else opt.name + opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if opt.local_rank != -1: + assert torch.cuda.device_count() > opt.local_rank + torch.cuda.set_device(opt.local_rank) + device = torch.device('cuda', opt.local_rank) + dist.init_process_group(backend='nccl', init_method='env://') # distributed backend + assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' + opt.batch_size = opt.total_batch_size // opt.world_size + + # Hyperparameters + with open(opt.hyp) as f: + hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps + if 'box' not in hyp: + warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' % + (opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120')) + hyp['box'] = hyp.pop('giou') + + # Train + logger.info(opt) + if not opt.evolve: + tb_writer = None # init loggers + if opt.global_rank in [-1, 0]: + logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') + tb_writer = SummaryWriter(opt.save_dir) # Tensorboard + train(hyp, opt, device, tb_writer, wandb) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0)} # image mixup (probability) + + assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' + opt.notest, opt.nosave = True, True # only test/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here + if opt.bucket: + os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists + + for _ in range(300): # generations to evolve + if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt('evolve.txt', ndmin=2) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() # weights + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([x[0] for x in meta.values()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, wandb=wandb) + + # Write mutation results + print_mutation(hyp.copy(), results, yaml_file, opt.bucket) + + # Plot results + plot_evolution(yaml_file) + print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' + f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/activations.py b/utils/activations.py new file mode 100644 index 0000000..aa3ddf0 --- /dev/null +++ b/utils/activations.py @@ -0,0 +1,72 @@ +# Activation functions + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- +class SiLU(nn.Module): # export-friendly version of nn.SiLU() + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for torchscript and CoreML + return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX + + +class MemoryEfficientSwish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x * torch.sigmoid(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + return grad_output * (sx * (1 + x * (1 - sx))) + + def forward(self, x): + return self.F.apply(x) + + +# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- +class Mish(nn.Module): + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- +class FReLU(nn.Module): + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) diff --git a/utils/autoanchor.py b/utils/autoanchor.py new file mode 100644 index 0000000..5dba9f1 --- /dev/null +++ b/utils/autoanchor.py @@ -0,0 +1,155 @@ +# Auto-anchor utils + +import numpy as np +import torch +import yaml +from scipy.cluster.vq import kmeans +from tqdm import tqdm + +from utils.general import colorstr + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchor_grid.prod(-1).view(-1) # anchor area + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da.sign() != ds.sign(): # same order + print('Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + m.anchor_grid[:] = m.anchor_grid.flip(0) + + +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + prefix = colorstr('autoanchor: ') + print(f'\n{prefix}Analyzing anchors... ', end='') + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1. / thr).float().mean() # best possible recall + return bpr, aat + + bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2)) + print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') + if bpr < 0.98: # threshold to recompute + print('. Attempting to improve anchors, please wait...') + na = m.anchor_grid.numel() // 2 # number of anchors + new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + new_bpr = metric(new_anchors.reshape(-1, 2))[0] + if new_bpr > bpr: # replace anchors + new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors) + m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference + m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + check_anchor_order(m) + print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + else: + print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') + print('') # newline + + +def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + path: path to dataset *.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + thr = 1. / thr + prefix = colorstr('autoanchor: ') + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') + print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' + f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + for i, x in enumerate(k): + print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + return k + + if isinstance(path, str): # *.yaml file + with open(path) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict + from utils.datasets import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + else: + dataset = path # dataset + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans calculation + print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') + s = wh.std(0) # sigmas for whitening + k, dist = kmeans(wh / s, n, iter=30) # points, mean distance + k *= s + wh = torch.tensor(wh, dtype=torch.float32) # filtered + wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + k = print_results(k) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + npr = np.random + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k) + + return print_results(k) diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh new file mode 100644 index 0000000..c319a83 --- /dev/null +++ b/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/utils/aws/resume.py b/utils/aws/resume.py new file mode 100644 index 0000000..faad8d2 --- /dev/null +++ b/utils/aws/resume.py @@ -0,0 +1,37 @@ +# Resume all interrupted trainings in yolov5/ dir including DDP trainings +# Usage: $ python utils/aws/resume.py + +import os +import sys +from pathlib import Path + +import torch +import yaml + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml') as f: + opt = yaml.load(f, Loader=yaml.SafeLoader) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh new file mode 100644 index 0000000..890606b --- /dev/null +++ b/utils/aws/userdata.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolov5 ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 + cd yolov5 + bash data/scripts/get_coco.sh && echo "Data done." & + sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario + done <<<"$list" +fi diff --git a/utils/cv_puttext.py b/utils/cv_puttext.py new file mode 100644 index 0000000..cc88ab8 --- /dev/null +++ b/utils/cv_puttext.py @@ -0,0 +1,22 @@ +import cv2 +import numpy as np +from PIL import Image, ImageDraw, ImageFont + +def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20): + if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型 + img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) + draw = ImageDraw.Draw(img) + fontText = ImageFont.truetype( + "fonts/platech.ttf", textSize, encoding="utf-8") + draw.text((left, top), text, textColor, font=fontText) + return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) + +if __name__ == '__main__': + imgPath = "result.jpg" + img = cv2.imread(imgPath) + + saveImg = cv2ImgAddText(img, '中国加油!', 50, 100, (255, 0, 0), 50) + + # cv2.imshow('display',saveImg) + cv2.imwrite('save.jpg',saveImg) + # cv2.waitKey() \ No newline at end of file diff --git a/utils/datasets.py b/utils/datasets.py new file mode 100644 index 0000000..feb5dc1 --- /dev/null +++ b/utils/datasets.py @@ -0,0 +1,1019 @@ +# Dataset utils and dataloaders + +import glob +import logging +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image, ExifTags +from torch.utils.data import Dataset +from tqdm import tqdm + +from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes +vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +logger = logging.getLogger(__name__) + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(files): + # Returns a single hash value of a list of files + return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + try: + rotation = dict(img._getexif().items())[orientation] + if rotation == 6: # rotation 270 + s = (s[1], s[0]) + elif rotation == 8: # rotation 90 + s = (s[1], s[0]) + except: + pass + + return s + + +def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, + rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + # Make sure only the first process in DDP process the dataset first, and the following others can use the cache + with torch_distributed_zero_first(rank): + dataset = LoadImagesAndLabels(path, imgsz, batch_size, + augment=augment, # augment images + hyp=hyp, # augmentation hyperparameters + rect=rect, # rectangular training + cache_images=cache, + single_cls=opt.single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None + loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader + # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() + dataloader = loader(dataset, + batch_size=batch_size, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) + return dataloader, dataset + + +class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: # for inference + def __init__(self, path, img_size=640): + p = str(Path(path)) # os-agnostic + p = os.path.abspath(p) # absolute path + if '*' in p: + files = sorted(glob.glob(p, recursive=True)) # glob + elif os.path.isdir(p): + files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir + elif os.path.isfile(p): + files = [p] # files + else: + raise Exception(f'ERROR: {p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in img_formats] + videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + if not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + else: + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='') + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, 'Image Not Found ' + path + print(f'image {self.count}/{self.nf} {path}: ', end='') + + # Padded resize + img = letterbox(img0, new_shape=self.img_size)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + def __init__(self, pipe='0', img_size=640): + self.img_size = img_size + + if pipe.isnumeric(): + pipe = eval(pipe) # local camera + # pipe = 'rtsp://192.168.1.64/1' # IP camera + # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login + # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera + + self.pipe = pipe + self.cap = cv2.VideoCapture(pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + if self.pipe == 0: # local camera + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + else: # IP camera + n = 0 + while True: + n += 1 + self.cap.grab() + if n % 30 == 0: # skip frames + ret_val, img0 = self.cap.retrieve() + if ret_val: + break + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + print(f'webcam {self.count}: ', end='') + + # Padded resize + img = letterbox(img0, new_shape=self.img_size)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return img_path, img, img0, None + + def __len__(self): + return 0 + + +class LoadStreams: # multiple IP or RTSP cameras + def __init__(self, sources='streams.txt', img_size=640): + self.mode = 'stream' + self.img_size = img_size + + if os.path.isfile(sources): + with open(sources, 'r') as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs = [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + for i, s in enumerate(sources): + # Start the thread to read frames from the video stream + print(f'{i + 1}/{n}: {s}... ', end='') + cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) + assert cap.isOpened(), f'Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) % 100 + _, self.imgs[i] = cap.read() # guarantee first frame + thread = Thread(target=self.update, args=([i, cap]), daemon=True) + print(f' success ({w}x{h} at {fps:.2f} FPS).') + thread.start() + print('') # newline + + # check for common shapes + s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + + def update(self, index, cap): + # Read next stream frame in a daemon thread + n = 0 + while cap.isOpened(): + n += 1 + # _, self.imgs[index] = cap.read() + cap.grab() + if n == 4: # read every 4th frame + _, self.imgs[index] = cap.retrieve() + n = 0 + time.sleep(0.01) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + img0 = self.imgs.copy() + if cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + # Letterbox + img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None + + def __len__(self): + return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings + return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths] + + +class LoadImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, + cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + elif p.is_file(): # file + with open(p, 'r') as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + else: + raise Exception(f'{prefix}{p} does not exist') + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + assert self.img_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + + # Check cache + self.label_files = img2label_paths(self.img_files) # labels + cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels + if cache_path.is_file(): + cache = torch.load(cache_path) # load + if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed + cache = self.cache_labels(cache_path, prefix) # re-cache + else: + cache = self.cache_labels(cache_path, prefix) # cache + + # Display cache + [nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total + desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=prefix + desc, total=n, initial=n) + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + + # Read cache + cache.pop('hash') # remove hash + labels, shapes = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes, dtype=np.float64) + self.img_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + if single_cls: + for x in self.labels: + x[:, 0] = 0 + + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.img_files = [self.img_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + + # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) + self.imgs = [None] * n + if cache_images: + gb = 0 # Gigabytes of cached images + self.img_hw0, self.img_hw = [None] * n, [None] * n + results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads + pbar = tqdm(enumerate(results), total=n) + for i, x in pbar: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate + pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) + for i, (im_file, lb_file) in enumerate(pbar): + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels' + + # verify labels + if os.path.isfile(lb_file): + nf += 1 # label found + with open(lb_file, 'r') as f: + l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + if len(l): + assert l.shape[1] == 5, 'labels require 5 columns each' + assert (l >= 0).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne += 1 # label empty + l = np.zeros((0, 5), dtype=np.float32) + else: + nm += 1 # label missing + l = np.zeros((0, 5), dtype=np.float32) + x[im_file] = [l, shape] + except Exception as e: + nc += 1 + print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + + pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \ + f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + + if nf == 0: + print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + + x['hash'] = get_hash(self.label_files + self.img_files) + x['results'] = [nf, nm, ne, nc, i + 1] + torch.save(x, path) # save for next time + logging.info(f'{prefix}New cache created: {path}') + return x + + def __len__(self): + return len(self.img_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = load_mosaic(self, index) + shapes = None + + # MixUp https://arxiv.org/pdf/1710.09412.pdf + if random.random() < hyp['mixup']: + img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) + r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + img = (img * r + img2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + + else: + # Load image + img, (h0, w0), (h, w) = load_image(self, index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + # Augment imagespace + if not mosaic: + img, labels = random_perspective(img, labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + # Augment colorspace + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Apply cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + nL = len(labels) # number of labels + if nL: + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh + labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 + labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + + if self.augment: + # flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nL: + labels[:, 2] = 1 - labels[:, 2] + + # flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nL: + labels[:, 1] = 1 - labels[:, 1] + + labels_out = torch.zeros((nL, 6)) + if nL: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.img_files[index], shapes + + @staticmethod + def collate_fn(batch): + img, label, path, shapes = zip(*batch) # transposed + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ + 0].type(img[i].type()) + l = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + img4.append(im) + label4.append(l) + + for i, l in enumerate(label4): + l[:, 0] = i # add target image index for build_targets() + + return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def load_image(self, index): + # loads 1 image from dataset, returns img, original hw, resized hw + img = self.imgs[index] + if img is None: # not cached + path = self.img_files[index] + img = cv2.imread(path) # BGR + assert img is not None, 'Image Not Found ' + path + h0, w0 = img.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) + return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + else: + return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + + +def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=np.int16) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + + # Histogram equalization + # if random.random() < 0.2: + # for i in range(3): + # img[:, :, i] = cv2.equalizeHist(img[:, :, i]) + + +def load_mosaic(self, index): + # loads images in a 4-mosaic + + labels4 = [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels = self.labels[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + labels4.append(labels) + + # Concat/clip labels + if len(labels4): + labels4 = np.concatenate(labels4, 0) + np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4 = random_perspective(img4, labels4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + +def load_mosaic9(self, index): + # loads images in a 9-mosaic + + labels9 = [] + s = self.img_size + indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords + + # Labels + labels = self.labels[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + labels9.append(labels) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + if len(labels9): + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + + np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, labels9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + +def replicate(img, labels): + # Replicate labels + h, w = img.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return img, labels + + +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True): + # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 + shape = img.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return img, ratio, (dw, dh) + + +def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = img.shape[0] + border[0] * 2 # shape(h,w,c) + width = img.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + # warp points + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + if perspective: + xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale + else: # affine + xy = xy[:, :2].reshape(n, 8) + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # # apply angle-based reduction of bounding boxes + # radians = a * math.pi / 180 + # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 + # x = (xy[:, 2] + xy[:, 0]) / 2 + # y = (xy[:, 3] + xy[:, 1]) / 2 + # w = (xy[:, 2] - xy[:, 0]) * reduction + # h = (xy[:, 3] - xy[:, 1]) * reduction + # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T + + # clip boxes + xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) + xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T) + targets = targets[i] + targets[:, 1:5] = xy[i] + + return img, targets + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def cutout(image, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + def bbox_ioa(box1, box2): + # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 + + # Intersection over box2 area + return inter_area / box2_area + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def create_folder(path='./new'): + # Create folder + if os.path.exists(path): + shutil.rmtree(path) # delete output folder + os.makedirs(path) # make new output folder + + +def flatten_recursive(path='../coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(path + '_flat') + create_folder(new_path) + for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') + # Convert detection dataset into classification dataset, with one directory per class + + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in img_formats: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file, 'r') as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128') + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + # Arguments + path: Path to images directory + weights: Train, val, test weights (list) + """ + path = Path(path) # images dir + files = list(path.rglob('*.*')) + n = len(files) # number of files + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + for i, img in tqdm(zip(indices, files), total=n): + if img.suffix[1:] in img_formats: + with open(path / txt[i], 'a') as f: + f.write(str(img) + '\n') # add image to txt file diff --git a/utils/face_datasets.py b/utils/face_datasets.py new file mode 100644 index 0000000..b702b55 --- /dev/null +++ b/utils/face_datasets.py @@ -0,0 +1,845 @@ +import glob +import logging +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread + +import cv2 +import numpy as np +import torch +from PIL import Image, ExifTags +from torch.utils.data import Dataset +from tqdm import tqdm + +from utils.general import xyxy2xywh, xywh2xyxy, clean_str +from utils.torch_utils import torch_distributed_zero_first + + +# Parameters +help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes +vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +logger = logging.getLogger(__name__) + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + +def get_hash(files): + # Returns a single hash value of a list of files + return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings + return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths] + + # return [x.replace(".jpg",".txt") for x in img_paths] + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + try: + rotation = dict(img._getexif().items())[orientation] + if rotation == 6: # rotation 270 + s = (s[1], s[0]) + elif rotation == 8: # rotation 90 + s = (s[1], s[0]) + except: + pass + + return s + +def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, + rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + # Make sure only the first process in DDP process the dataset first, and the following others can use the cache + with torch_distributed_zero_first(rank): + dataset = LoadFaceImagesAndLabels(path, imgsz, batch_size, + augment=augment, # augment images + hyp=hyp, # augmentation hyperparameters + rect=rect, # rectangular training + cache_images=cache, + single_cls=opt.single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + ) + + batch_size = min(batch_size, len(dataset)) + nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None + loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader + # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() + dataloader = loader(dataset, + batch_size=batch_size, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadFaceImagesAndLabels.collate_fn4 if quad else LoadFaceImagesAndLabels.collate_fn) + return dataloader, dataset +class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) +class _RepeatSampler(object): + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + +class LoadFaceImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, + cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + elif p.is_file(): # file + with open(p, 'r') as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + else: + raise Exception('%s does not exist' % p) + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + assert self.img_files, 'No images found' + except Exception as e: + raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url)) + + # Check cache + self.label_files = img2label_paths(self.img_files) # labels + cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels + if cache_path.is_file(): + cache = torch.load(cache_path) # load + if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed + cache = self.cache_labels(cache_path) # re-cache + else: + cache = self.cache_labels(cache_path) # cache + + # Display cache + [nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total + desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=desc, total=n, initial=n) + assert nf > 0 or not augment, f'No labels found in {cache_path}. Can not train without labels. See {help_url}' + + # Read cache + cache.pop('hash') # remove hash + labels, shapes = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes, dtype=np.float64) + self.img_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + if single_cls: + for x in self.labels: + x[:, 0] = 0 + + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(np.int_) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.img_files = [self.img_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int_) * stride + + # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) + self.imgs = [None] * n + if cache_images: + gb = 0 # Gigabytes of cached images + self.img_hw0, self.img_hw = [None] * n, [None] * n + results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads + pbar = tqdm(enumerate(results), total=n) + for i, x in pbar: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes + pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9) + + def cache_labels(self, path=Path('./labels.cache')): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate + pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) + for i, (im_file, lb_file) in enumerate(pbar): + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels' + + # verify labels + if os.path.isfile(lb_file): + nf += 1 # label found + with open(lb_file, 'r') as f: + l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + if len(l): + assert l.shape[1] == 13, 'labels require 13 columns each' + assert (l >= -1).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne += 1 # label empty + l = np.zeros((0, 13), dtype=np.float32) + else: + nm += 1 # label missing + l = np.zeros((0, 13), dtype=np.float32) + x[im_file] = [l, shape] + except Exception as e: + nc += 1 + print('WARNING: Ignoring corrupted image and/or label %s: %s' % (im_file, e)) + + pbar.desc = f"Scanning '{path.parent / path.stem}' for images and labels... " \ + f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + + if nf == 0: + print(f'WARNING: No labels found in {path}. See {help_url}') + + x['hash'] = get_hash(self.label_files + self.img_files) + x['results'] = [nf, nm, ne, nc, i + 1] + torch.save(x, path) # save for next time + logging.info(f"New cache created: {path}") + return x + + def __len__(self): + return len(self.img_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = load_mosaic_face(self, index) + shapes = None + + # MixUp https://arxiv.org/pdf/1710.09412.pdf + if random.random() < hyp['mixup']: + img2, labels2 = load_mosaic_face(self, random.randint(0, self.n - 1)) + r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + img = (img * r + img2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + + else: + # Load image + img, (h0, w0), (h, w) = load_image(self, index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + # Load labels + labels = [] + x = self.labels[index] + if x.size > 0: + # Normalized xywh to pixel xyxy format + labels = x.copy() + labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width + labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height + labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] + labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] + + #labels[:, 5] = ratio[0] * w * x[:, 5] + pad[0] # pad width + labels[:, 5] = np.array(x[:, 5] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 5] + pad[0]) + ( + np.array(x[:, 5] > 0, dtype=np.int32) - 1) + labels[:, 6] = np.array(x[:, 6] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 6] + pad[1]) + ( + np.array(x[:, 6] > 0, dtype=np.int32) - 1) + labels[:, 7] = np.array(x[:, 7] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 7] + pad[0]) + ( + np.array(x[:, 7] > 0, dtype=np.int32) - 1) + labels[:, 8] = np.array(x[:, 8] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 8] + pad[1]) + ( + np.array(x[:, 8] > 0, dtype=np.int32) - 1) + labels[:, 9] = np.array(x[:, 5] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 9] + pad[0]) + ( + np.array(x[:, 9] > 0, dtype=np.int32) - 1) + labels[:, 10] = np.array(x[:, 5] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 10] + pad[1]) + ( + np.array(x[:, 10] > 0, dtype=np.int32) - 1) + labels[:, 11] = np.array(x[:, 11] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 11] + pad[0]) + ( + np.array(x[:, 11] > 0, dtype=np.int32) - 1) + labels[:, 12] = np.array(x[:, 12] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 12] + pad[1]) + ( + np.array(x[:, 12] > 0, dtype=np.int32) - 1) + # labels[:, 13] = np.array(x[:, 13] > 0, dtype=np.int32) * (ratio[0] * w * x[:, 13] + pad[0]) + ( + # np.array(x[:, 13] > 0, dtype=np.int32) - 1) + # labels[:, 14] = np.array(x[:, 14] > 0, dtype=np.int32) * (ratio[1] * h * x[:, 14] + pad[1]) + ( + # np.array(x[:, 14] > 0, dtype=np.int32) - 1) + + if self.augment: + # Augment imagespace + if not mosaic: + img, labels = random_perspective(img, labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + # Augment colorspace + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Apply cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + nL = len(labels) # number of labels + if nL: + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh + labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 + labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + + labels[:, [5, 7, 9, 11]] /= img.shape[1] # normalized landmark x 0-1 + labels[:, [5, 7, 9, 11]] = np.where(labels[:, [5, 7, 9, 11]] < 0, -1, labels[:, [5, 7, 9, 11]]) + labels[:, [6, 8, 10, 12]] /= img.shape[0] # normalized landmark y 0-1 + labels[:, [6, 8, 10, 12]] = np.where(labels[:, [6, 8, 10, 12]] < 0, -1, labels[:, [6, 8, 10, 12]]) + + if self.augment: + # flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nL: + labels[:, 2] = 1 - labels[:, 2] + + labels[:, 6] = np.where(labels[:,6] < 0, -1, 1 - labels[:, 6]) + labels[:, 8] = np.where(labels[:, 8] < 0, -1, 1 - labels[:, 8]) + labels[:, 10] = np.where(labels[:, 10] < 0, -1, 1 - labels[:, 10]) + labels[:, 12] = np.where(labels[:, 12] < 0, -1, 1 - labels[:, 12]) + # labels[:, 14] = np.where(labels[:, 14] < 0, -1, 1 - labels[:, 14]) + + # flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nL: + labels[:, 1] = 1 - labels[:, 1] + + labels[:, 5] = np.where(labels[:, 5] < 0, -1, 1 - labels[:, 5]) + labels[:, 7] = np.where(labels[:, 7] < 0, -1, 1 - labels[:, 7]) + labels[:, 9] = np.where(labels[:, 9] < 0, -1, 1 - labels[:, 9]) + labels[:, 11] = np.where(labels[:, 11] < 0, -1, 1 - labels[:, 11]) + # labels[:, 13] = np.where(labels[:, 13] < 0, -1, 1 - labels[:, 13]) + + #左右镜像的时候,关键点应该交换位置,不然的话顺序就错了 + left_top = np.copy(labels[:, [5, 6]]) + left_bottom = np.copy(labels[:, [9, 10]]) + labels[:, [5, 6]] = labels[:, [7, 8]] + labels[:, [7, 8]] = left_top + labels[:, [9, 10]] = labels[:, [11, 12]] + labels[:, [11, 12]] = left_bottom + + + + # eye_left = np.copy(labels[:, [5, 6]]) + # mouth_left = np.copy(labels[:, [11, 12]]) + # labels[:, [5, 6]] = labels[:, [7, 8]] + # labels[:, [7, 8]] = eye_left + # labels[:, [11, 12]] = labels[:, [13, 14]] + # labels[:, [13, 14]] = mouth_left + + labels_out = torch.zeros((nL, 14)) + if nL: + labels_out[:, 1:] = torch.from_numpy(labels) + #showlabels(img, labels[:, 1:5], labels[:, 5:13]) + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + #print(index, ' --- labels_out: ', labels_out) + #if nL: + #print( ' : landmarks : ', torch.max(labels_out[:, 5:13]), ' --- ', torch.min(labels_out[:, 5:13])) + return torch.from_numpy(img), labels_out, self.img_files[index], shapes + + @staticmethod + def collate_fn(batch): + img, label, path, shapes = zip(*batch) # transposed + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes + + +def showlabels(img, boxs, landmarks): + for box in boxs: + x,y,w,h = box[0] * img.shape[1], box[1] * img.shape[0], box[2] * img.shape[1], box[3] * img.shape[0] + #cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0), 2) + cv2.rectangle(img, (int(x - w/2), int(y - h/2)), (int(x + w/2), int(y + h/2)), (0, 255, 0), 2) + + for landmark in landmarks: + #cv2.circle(img,(60,60),30,(0,0,255)) + for i in range(4): + cv2.circle(img, (int(landmark[2*i] * img.shape[1]), int(landmark[2*i+1]*img.shape[0])), 3 ,(0,0,255), -1) + cv2.imshow('test', img) + cv2.waitKey(0) + + +def load_mosaic_face(self, index): + # loads images in a mosaic + labels4 = [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + x = self.labels[index] + labels = x.copy() + if x.size > 0: # Normalized xywh to pixel xyxy format + #box, x1,y1,x2,y2 + labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw + labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh + labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw + labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh + #10 landmarks + + labels[:, 5] = np.array(x[:, 5] > 0, dtype=np.int32) * (w * x[:, 5] + padw) + (np.array(x[:, 5] > 0, dtype=np.int32) - 1) + labels[:, 6] = np.array(x[:, 6] > 0, dtype=np.int32) * (h * x[:, 6] + padh) + (np.array(x[:, 6] > 0, dtype=np.int32) - 1) + labels[:, 7] = np.array(x[:, 7] > 0, dtype=np.int32) * (w * x[:, 7] + padw) + (np.array(x[:, 7] > 0, dtype=np.int32) - 1) + labels[:, 8] = np.array(x[:, 8] > 0, dtype=np.int32) * (h * x[:, 8] + padh) + (np.array(x[:, 8] > 0, dtype=np.int32) - 1) + labels[:, 9] = np.array(x[:, 9] > 0, dtype=np.int32) * (w * x[:, 9] + padw) + (np.array(x[:, 9] > 0, dtype=np.int32) - 1) + labels[:, 10] = np.array(x[:, 10] > 0, dtype=np.int32) * (h * x[:, 10] + padh) + (np.array(x[:, 10] > 0, dtype=np.int32) - 1) + labels[:, 11] = np.array(x[:, 11] > 0, dtype=np.int32) * (w * x[:, 11] + padw) + (np.array(x[:, 11] > 0, dtype=np.int32) - 1) + labels[:, 12] = np.array(x[:, 12] > 0, dtype=np.int32) * (h * x[:, 12] + padh) + (np.array(x[:, 12] > 0, dtype=np.int32) - 1) + # labels[:, 13] = np.array(x[:, 13] > 0, dtype=np.int32) * (w * x[:, 13] + padw) + (np.array(x[:, 13] > 0, dtype=np.int32) - 1) + # labels[:, 14] = np.array(x[:, 14] > 0, dtype=np.int32) * (h * x[:, 14] + padh) + (np.array(x[:, 14] > 0, dtype=np.int32) - 1) + labels4.append(labels) + + # Concat/clip labels + if len(labels4): + labels4 = np.concatenate(labels4, 0) + np.clip(labels4[:, 1:5], 0, 2 * s, out=labels4[:, 1:5]) # use with random_perspective + # img4, labels4 = replicate(img4, labels4) # replicate + + #landmarks + labels4[:, 5:] = np.where(labels4[:, 5:] < 0, -1, labels4[:, 5:]) + labels4[:, 5:] = np.where(labels4[:, 5:] > 2 * s, -1, labels4[:, 5:]) + + labels4[:, 5] = np.where(labels4[:, 6] == -1, -1, labels4[:, 5]) + labels4[:, 6] = np.where(labels4[:, 5] == -1, -1, labels4[:, 6]) + + labels4[:, 7] = np.where(labels4[:, 8] == -1, -1, labels4[:, 7]) + labels4[:, 8] = np.where(labels4[:, 7] == -1, -1, labels4[:, 8]) + + labels4[:, 9] = np.where(labels4[:, 10] == -1, -1, labels4[:, 9]) + labels4[:, 10] = np.where(labels4[:, 9] == -1, -1, labels4[:, 10]) + + labels4[:, 11] = np.where(labels4[:, 12] == -1, -1, labels4[:, 11]) + labels4[:, 12] = np.where(labels4[:, 11] == -1, -1, labels4[:, 12]) + + # labels4[:, 13] = np.where(labels4[:, 14] == -1, -1, labels4[:, 13]) + # labels4[:, 14] = np.where(labels4[:, 13] == -1, -1, labels4[:, 14]) + + # Augment + img4, labels4 = random_perspective(img4, labels4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + return img4, labels4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def load_image(self, index): + # loads 1 image from dataset, returns img, original hw, resized hw + img = self.imgs[index] + if img is None: # not cached + path = self.img_files[index] + img = cv2.imread(path) # BGR + assert img is not None, 'Image Not Found ' + path + h0, w0 = img.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) + return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + else: + return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + + +def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=np.int16) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + + # Histogram equalization + # if random.random() < 0.2: + # for i in range(3): + # img[:, :, i] = cv2.equalizeHist(img[:, :, i]) + +def replicate(img, labels): + # Replicate labels + h, w = img.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return img, labels + + +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True): + # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 + shape = img.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return img, ratio, (dw, dh) + + +def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = img.shape[0] + border[0] * 2 # shape(h,w,c) + width = img.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + # warp points + #xy = np.ones((n * 4, 3)) + xy = np.ones((n * 8, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2, 5, 6, 7, 8, 9, 10, 11, 12]].reshape(n * 8, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + if perspective: + xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 16) # rescale + else: # affine + xy = xy[:, :2].reshape(n, 16) + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + + landmarks = xy[:, [8, 9, 10, 11, 12, 13, 14,15]] + mask = np.array(targets[:, 5:] > 0, dtype=np.int32) + landmarks = landmarks * mask + landmarks = landmarks + mask - 1 + + landmarks = np.where(landmarks < 0, -1, landmarks) + landmarks[:, [0, 2, 4, 6]] = np.where(landmarks[:, [0, 2, 4, 6]] > width, -1, landmarks[:, [0, 2, 4, 6]]) + landmarks[:, [1, 3, 5, 7]] = np.where(landmarks[:, [1, 3, 5, 7]] > height, -1,landmarks[:, [1, 3, 5, 7]]) + + landmarks[:, 0] = np.where(landmarks[:, 1] == -1, -1, landmarks[:, 0]) + landmarks[:, 1] = np.where(landmarks[:, 0] == -1, -1, landmarks[:, 1]) + + landmarks[:, 2] = np.where(landmarks[:, 3] == -1, -1, landmarks[:, 2]) + landmarks[:, 3] = np.where(landmarks[:, 2] == -1, -1, landmarks[:, 3]) + + landmarks[:, 4] = np.where(landmarks[:, 5] == -1, -1, landmarks[:, 4]) + landmarks[:, 5] = np.where(landmarks[:, 4] == -1, -1, landmarks[:, 5]) + + landmarks[:, 6] = np.where(landmarks[:, 7] == -1, -1, landmarks[:, 6]) + landmarks[:, 7] = np.where(landmarks[:, 6] == -1, -1, landmarks[:, 7]) + + # landmarks[:, 8] = np.where(landmarks[:, 9] == -1, -1, landmarks[:, 8]) + # landmarks[:, 9] = np.where(landmarks[:, 8] == -1, -1, landmarks[:, 9]) + + targets[:,5:] = landmarks + + xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # # apply angle-based reduction of bounding boxes + # radians = a * math.pi / 180 + # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 + # x = (xy[:, 2] + xy[:, 0]) / 2 + # y = (xy[:, 3] + xy[:, 1]) / 2 + # w = (xy[:, 2] - xy[:, 0]) * reduction + # h = (xy[:, 3] - xy[:, 1]) * reduction + # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T + + # clip boxes + xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) + xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T) + targets = targets[i] + targets[:, 1:5] = xy[i] + + return img, targets + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates + + +def cutout(image, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + def bbox_ioa(box1, box2): + # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 + + # Intersection over box2 area + return inter_area / box2_area + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def create_folder(path='./new'): + # Create folder + if os.path.exists(path): + shutil.rmtree(path) # delete output folder + os.makedirs(path) # make new output folder + + +def flatten_recursive(path='../coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(path + '_flat') + create_folder(new_path) + for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') + # Convert detection dataset into classification dataset, with one directory per class + + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in img_formats: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file, 'r') as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int_) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128') + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + # Arguments + path: Path to images directory + weights: Train, val, test weights (list) + """ + path = Path(path) # images dir + files = list(path.rglob('*.*')) + n = len(files) # number of files + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + for i, img in tqdm(zip(indices, files), total=n): + if img.suffix[1:] in img_formats: + with open(path / txt[i], 'a') as f: + f.write(str(img) + '\n') # add image to txt file diff --git a/utils/general.py b/utils/general.py new file mode 100644 index 0000000..3dfb167 --- /dev/null +++ b/utils/general.py @@ -0,0 +1,647 @@ +# General utils + +import glob +import logging +import math +import os +import random +import re +import subprocess +import time +from pathlib import Path + +import cv2 +import numpy as np +import torch +import torchvision +import yaml + +from utils.google_utils import gsutil_getsize +from utils.metrics import fitness +from utils.torch_utils import init_torch_seeds + +# Settings +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads + + +def set_logging(rank=-1): + logging.basicConfig( + format="%(message)s", + level=logging.INFO if rank in [-1, 0] else logging.WARN) + + +def init_seeds(seed=0): + # Initialize random number generator (RNG) seeds + random.seed(seed) + np.random.seed(seed) + init_torch_seeds(seed) + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 53)) # check host accesability + return True + except OSError: + return False + + +def check_git_status(): + # Recommend 'git pull' if code is out of date + print(colorstr('github: '), end='') + try: + assert Path('.git').exists(), 'skipping check (not a git repository)' + assert not Path('/workspace').exists(), 'skipping check (Docker image)' # not Path('/.dockerenv').exists() + assert check_online(), 'skipping check (offline)' + + cmd = 'git fetch && git config --get remote.origin.url' # github repo url + url = subprocess.check_output(cmd, shell=True).decode()[:-1] + cmd = 'git rev-list $(git rev-parse --abbrev-ref HEAD)..origin/master --count' # commits behind + n = int(subprocess.check_output(cmd, shell=True)) + if n > 0: + print(f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. " + f"Use 'git pull' to update or 'git clone {url}' to download latest.") + else: + print(f'up to date with {url} ✅') + except Exception as e: + print(e) + + +def check_requirements(file='requirements.txt'): + # Check installed dependencies meet requirements + import pkg_resources + requirements = pkg_resources.parse_requirements(Path(file).open()) + requirements = [x.name + ''.join(*x.specs) if len(x.specs) else x.name for x in requirements] + pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met + + +def check_img_size(img_size, s=32): + # Verify img_size is a multiple of stride s + new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + if new_size != img_size: + print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + return new_size + + +def check_file(file): + # Search for file if not found + if os.path.isfile(file) or file == '': + return file + else: + files = glob.glob('./**/' + file, recursive=True) # find file + assert len(files), 'File Not Found: %s' % file # assert file was found + assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique + return files[0] # return file + + +def check_dataset(dict): + # Download dataset if not found locally + val, s = dict.get('val'), dict.get('download') + if val and len(val): + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + if s and len(s): # download script + print('Downloading %s ...' % s) + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + torch.hub.download_url_to_file(s, f) + r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip + else: # bash script + r = os.system(s) + print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value + else: + raise Exception('Dataset not found.') + + +def make_divisible(x, divisor): + # Returns x evenly divisible by divisor + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(np.int_) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights) + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + class_counts = np.array([np.bincount(x[:, 0].astype(np.int_), minlength=nc) for x in labels]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + return x + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, img_shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if GIoU or DIoU or CIoU: + # convex (smallest enclosing box) width + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * \ + torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / ((1 + eps) - iou + v) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - + torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + # iou = inter / (area1 + area2 - inter) + return inter / (area1[:, None] + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + # iou = inter / (area1 + area2 - inter) + return inter / (wh1.prod(2) + wh2.prod(2) - inter) + +def jaccard_diou(box_a, box_b, iscrowd:bool=False): + use_batch = True + if box_a.dim() == 2: + use_batch = False + box_a = box_a[None, ...] + box_b = box_b[None, ...] + + inter = intersect(box_a, box_b) + area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) * + (box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B] + area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) * + (box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B] + union = area_a + area_b - inter + x1 = ((box_a[:, :, 2]+box_a[:, :, 0]) / 2).unsqueeze(2).expand_as(inter) + y1 = ((box_a[:, :, 3]+box_a[:, :, 1]) / 2).unsqueeze(2).expand_as(inter) + x2 = ((box_b[:, :, 2]+box_b[:, :, 0]) / 2).unsqueeze(1).expand_as(inter) + y2 = ((box_b[:, :, 3]+box_b[:, :, 1]) / 2).unsqueeze(1).expand_as(inter) + + t1 = box_a[:, :, 1].unsqueeze(2).expand_as(inter) + b1 = box_a[:, :, 3].unsqueeze(2).expand_as(inter) + l1 = box_a[:, :, 0].unsqueeze(2).expand_as(inter) + r1 = box_a[:, :, 2].unsqueeze(2).expand_as(inter) + + t2 = box_b[:, :, 1].unsqueeze(1).expand_as(inter) + b2 = box_b[:, :, 3].unsqueeze(1).expand_as(inter) + l2 = box_b[:, :, 0].unsqueeze(1).expand_as(inter) + r2 = box_b[:, :, 2].unsqueeze(1).expand_as(inter) + + cr = torch.max(r1, r2) + cl = torch.min(l1, l2) + ct = torch.min(t1, t2) + cb = torch.max(b1, b2) + D = (((x2 - x1)**2 + (y2 - y1)**2) / ((cr-cl)**2 + (cb-ct)**2 + 1e-7)) + out = inter / area_a if iscrowd else inter / (union + 1e-7) - D ** 0.7 + return out if use_batch else out.squeeze(0) + + +def non_max_suppression_face(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()): + """Performs Non-Maximum Suppression (NMS) on inference results + Returns: + detections with shape: nx6 (x1, y1, x2, y2, conf, cls) + """ + + nc = prediction.shape[2] - 13 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) + multi_label=False + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 14), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 13), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 13] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 13:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, landmarks, cls) + if multi_label: + i, j = (x[:, 13:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 13, None], x[i, 5:13] ,j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 13:].max(1, keepdim=True) + x = torch.cat((box, conf, x[:, 5:13], j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # If none remain process next image + n = x.shape[0] # number of boxes + if not n: + continue + + # Batched NMS + c = x[:, 13:14] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + #if i.shape[0] > max_det: # limit detections + # i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + break # time limit exceeded + + return output + + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()): + """Performs Non-Maximum Suppression (NMS) on inference results + + Returns: + detections with shape: nx6 (x1, y1, x2, y2, conf, cls) + """ + + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + # (pixels) minimum and maximum box width and height + min_wh, max_wh = 2, 4096 + #max_det = 300 # maximum number of detections per image + #max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[ + conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + #elif n > max_nms: # excess boxes + # x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + x = x[x[:, 4].argsort(descending=True)] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + #if i.shape[0] > max_det: # limit detections + # i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + for key in 'optimizer', 'training_results', 'wandb_id': + x[key] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb)) + + +def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): + # Print mutation results to evolve.txt (for use with train.py --evolve) + a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys + b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) + + if bucket: + url = 'gs://%s/evolve.txt' % bucket + if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): + os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + + with open('evolve.txt', 'a') as f: # append result + f.write(c + b + '\n') + x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows + x = x[np.argsort(-fitness(x))] # sort + np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + + # Save yaml + for i, k in enumerate(hyp.keys()): + hyp[k] = float(x[0, i + 7]) + with open(yaml_file, 'w') as f: + results = tuple(x[0, :7]) + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') + yaml.dump(hyp, f, sort_keys=False) + + if bucket: + os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + + +def apply_classifier(x, model, img, im0): + # applies a second stage classifier to yolo outputs + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for j, a in enumerate(d): # per item + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + # cv2.imwrite('test%i.jpg' % j, cutout) + + # BGR to RGB, to 3x416x416 + im = im[:, :, ::-1].transpose(2, 0, 1) + im = np.ascontiguousarray( + im, dtype=np.float32) # uint8 to float32 + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device) + ).argmax(1) # classifier prediction + # retain matching class detections + x[i] = x[i][pred_cls1 == pred_cls2] + + return x + + +def increment_path(path, exist_ok=True, sep=''): + # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. + path = Path(path) # os-agnostic + if (path.exists() and exist_ok) or (not path.exists()): + return str(path) + else: + dirs = glob.glob(f"{path}{sep}*") # similar paths + matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] + i = [int(m.groups()[0]) for m in matches if m] # indices + n = max(i) + 1 if i else 2 # increment number + return f"{path}{sep}{n}" # update path diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml new file mode 100644 index 0000000..ac29d10 --- /dev/null +++ b/utils/google_app_engine/app.yaml @@ -0,0 +1,14 @@ +runtime: custom +env: flex + +service: yolov5app + +liveness_check: + initial_delay_sec: 600 + +manual_scaling: + instances: 1 +resources: + cpu: 1 + memory_gb: 4 + disk_size_gb: 20 \ No newline at end of file diff --git a/utils/google_utils.py b/utils/google_utils.py new file mode 100644 index 0000000..024dc78 --- /dev/null +++ b/utils/google_utils.py @@ -0,0 +1,122 @@ +# Google utils: https://cloud.google.com/storage/docs/reference/libraries + +import os +import platform +import subprocess +import time +from pathlib import Path + +import requests +import torch + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def attempt_download(file, repo='ultralytics/yolov5'): + # Attempt file download if does not exist + file = Path(str(file).strip().replace("'", '').lower()) + + if not file.exists(): + try: + response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api + assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] + tag = response['tag_name'] # i.e. 'v1.0' + except: # fallback plan + assets = ['yolov5.pt', 'yolov5.pt', 'yolov5l.pt', 'yolov5x.pt'] + tag = subprocess.check_output('git tag', shell=True).decode('utf-8').split('\n')[-2] + + name = file.name + if name in assets: + msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' + redundant = False # second download option + try: # GitHub + url = f'https://github.com/{repo}/releases/download/{tag}/{name}' + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert file.exists() and file.stat().st_size > 1E6 # check + except Exception as e: # GCP + print(f'Download error: {e}') + assert redundant, 'No secondary mirror' + url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' + print(f'Downloading {url} to {file}...') + os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) + finally: + if not file.exists() or file.stat().st_size < 1E6: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: Download failure: {msg}') + print('') + return + + +def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + file.unlink(missing_ok=True) # remove existing file + cookie.unlink(missing_ok=True) # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + cookie.unlink(missing_ok=True) # remove existing cookie + + # Error check + if r != 0: + file.unlink(missing_ok=True) # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + os.system(f'unzip -q {file}') # unzip + file.unlink() # remove zip to free space + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/utils/infer_utils.py b/utils/infer_utils.py new file mode 100644 index 0000000..9dc428c --- /dev/null +++ b/utils/infer_utils.py @@ -0,0 +1,36 @@ +import torch + + + +def decode_infer(output, stride): + # logging.info(torch.tensor(output.shape[0])) + # logging.info(output.shape) + # # bz is batch-size + # bz = tuple(torch.tensor(output.shape[0])) + # gridsize = tuple(torch.tensor(output.shape[-1])) + # logging.info(gridsize) + sh = torch.tensor(output.shape) + bz = sh[0] + gridsize = sh[-1] + + output = output.permute(0, 2, 3, 1) + output = output.view(bz, gridsize, gridsize, self.gt_per_grid, 5+self.numclass) + x1y1, x2y2, conf, prob = torch.split( + output, [2, 2, 1, self.numclass], dim=4) + + shiftx = torch.arange(0, gridsize, dtype=torch.float32) + shifty = torch.arange(0, gridsize, dtype=torch.float32) + shifty, shiftx = torch.meshgrid([shiftx, shifty]) + shiftx = shiftx.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid) + shifty = shifty.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid) + + xy_grid = torch.stack([shiftx, shifty], dim=4).cuda() + x1y1 = (xy_grid+0.5-torch.exp(x1y1))*stride + x2y2 = (xy_grid+0.5+torch.exp(x2y2))*stride + + xyxy = torch.cat((x1y1, x2y2), dim=4) + conf = torch.sigmoid(conf) + prob = torch.sigmoid(prob) + output = torch.cat((xyxy, conf, prob), 4) + output = output.view(bz, -1, 5+self.numclass) + return output \ No newline at end of file diff --git a/utils/loss.py b/utils/loss.py new file mode 100644 index 0000000..661405a --- /dev/null +++ b/utils/loss.py @@ -0,0 +1,305 @@ +# Loss functions + +import torch +import torch.nn as nn +import numpy as np +from utils.general import bbox_iou +from utils.torch_utils import is_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super(BCEBlurWithLogitsLoss, self).__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(FocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(QFocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + +class WingLoss(nn.Module): + def __init__(self, w=10, e=2): + super(WingLoss, self).__init__() + # https://arxiv.org/pdf/1711.06753v4.pdf Figure 5 + self.w = w + self.e = e + self.C = self.w - self.w * np.log(1 + self.w / self.e) + + def forward(self, x, t, sigma=1): + weight = torch.ones_like(t) + weight[torch.where(t==-1)] = 0 + diff = weight * (x - t) + abs_diff = diff.abs() + flag = (abs_diff.data < self.w).float() + y = flag * self.w * torch.log(1 + abs_diff / self.e) + (1 - flag) * (abs_diff - self.C) + return y.sum() + +class LandmarksLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=1.0): + super(LandmarksLoss, self).__init__() + self.loss_fcn = WingLoss()#nn.SmoothL1Loss(reduction='sum') + self.alpha = alpha + + def forward(self, pred, truel, mask): + loss = self.loss_fcn(pred*mask, truel*mask) + return loss / (torch.sum(mask) + 10e-14) + + +def compute_loss(p, targets, model): # predictions, targets, model + device = targets.device + lcls, lbox, lobj, lmark = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + tcls, tbox, indices, anchors, tlandmarks, lmks_mask = build_targets(p, targets, model) # targets + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) # weight=model.class_weights) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + landmarks_loss = LandmarksLoss(1.0) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + cp, cn = smooth_BCE(eps=0.0) + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + # Losses + nt = 0 # number of targets + no = len(p) # number of outputs + balance = [4.0, 1.0, 0.4] if no == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6 + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + nt += n # cumulative targets + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + if model.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 13:], cn, device=device) # targets + t[range(n), tcls[i]] = cp + lcls += BCEcls(ps[:, 13:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + #landmarks loss + #plandmarks = ps[:,5:13].sigmoid() * 8. - 4. + plandmarks = ps[:,5:13] + + plandmarks[:, 0:2] = plandmarks[:, 0:2] * anchors[i] + plandmarks[:, 2:4] = plandmarks[:, 2:4] * anchors[i] + plandmarks[:, 4:6] = plandmarks[:, 4:6] * anchors[i] + plandmarks[:, 6:8] = plandmarks[:, 6:8] * anchors[i] + # plandmarks[:, 8:10] = plandmarks[:,8:10] * anchors[i] + + lmark += landmarks_loss(plandmarks, tlandmarks[i], lmks_mask[i]) + + + lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss + + s = 3 / no # output count scaling + lbox *= h['box'] * s + lobj *= h['obj'] * s * (1.4 if no == 4 else 1.) + lcls *= h['cls'] * s + lmark *= h['landmark'] * s + + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + lmark + return loss * bs, torch.cat((lbox, lobj, lcls, lmark, loss)).detach() + + +def build_targets(p, targets, model): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + na, nt = det.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch, landmarks, lmks_mask = [], [], [], [], [], [] + #gain = torch.ones(7, device=targets.device) # normalized to gridspace gain + gain = torch.ones(15, device=targets.device) + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(det.nl): + anchors, shape = det.anchors[i], p[i].shape + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + #landmarks 10 + gain[6:14] = torch.tensor(p[i].shape)[[3, 2, 3, 2, 3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 14].long() # anchor indices + #indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + #landmarks + lks = t[:,6:14] + #lks_mask = lks > 0 + #lks_mask = lks_mask.float() + lks_mask = torch.where(lks < 0, torch.full_like(lks, 0.), torch.full_like(lks, 1.0)) + + #应该是关键点的坐标除以anch的宽高才对,便于模型学习。使用gwh会导致不同关键点的编码不同,没有统一的参考标准 + + lks[:, [0, 1]] = (lks[:, [0, 1]] - gij) + lks[:, [2, 3]] = (lks[:, [2, 3]] - gij) + lks[:, [4, 5]] = (lks[:, [4, 5]] - gij) + lks[:, [6, 7]] = (lks[:, [6, 7]] - gij) + # lks[:, [8, 9]] = (lks[:, [8, 9]] - gij) + + ''' + #anch_w = torch.ones(5, device=targets.device).fill_(anchors[0][0]) + #anch_wh = torch.ones(5, device=targets.device) + anch_f_0 = (a == 0).unsqueeze(1).repeat(1, 5) + anch_f_1 = (a == 1).unsqueeze(1).repeat(1, 5) + anch_f_2 = (a == 2).unsqueeze(1).repeat(1, 5) + lks[:, [0, 2, 4, 6, 8]] = torch.where(anch_f_0, lks[:, [0, 2, 4, 6, 8]] / anchors[0][0], lks[:, [0, 2, 4, 6, 8]]) + lks[:, [0, 2, 4, 6, 8]] = torch.where(anch_f_1, lks[:, [0, 2, 4, 6, 8]] / anchors[1][0], lks[:, [0, 2, 4, 6, 8]]) + lks[:, [0, 2, 4, 6, 8]] = torch.where(anch_f_2, lks[:, [0, 2, 4, 6, 8]] / anchors[2][0], lks[:, [0, 2, 4, 6, 8]]) + + lks[:, [1, 3, 5, 7, 9]] = torch.where(anch_f_0, lks[:, [1, 3, 5, 7, 9]] / anchors[0][1], lks[:, [1, 3, 5, 7, 9]]) + lks[:, [1, 3, 5, 7, 9]] = torch.where(anch_f_1, lks[:, [1, 3, 5, 7, 9]] / anchors[1][1], lks[:, [1, 3, 5, 7, 9]]) + lks[:, [1, 3, 5, 7, 9]] = torch.where(anch_f_2, lks[:, [1, 3, 5, 7, 9]] / anchors[2][1], lks[:, [1, 3, 5, 7, 9]]) + + #new_lks = lks[lks_mask>0] + #print('new_lks: min --- ', torch.min(new_lks), ' max --- ', torch.max(new_lks)) + + lks_mask_1 = torch.where(lks < -3, torch.full_like(lks, 0.), torch.full_like(lks, 1.0)) + lks_mask_2 = torch.where(lks > 3, torch.full_like(lks, 0.), torch.full_like(lks, 1.0)) + + lks_mask_new = lks_mask * lks_mask_1 * lks_mask_2 + lks_mask_new[:, 0] = lks_mask_new[:, 0] * lks_mask_new[:, 1] + lks_mask_new[:, 1] = lks_mask_new[:, 0] * lks_mask_new[:, 1] + lks_mask_new[:, 2] = lks_mask_new[:, 2] * lks_mask_new[:, 3] + lks_mask_new[:, 3] = lks_mask_new[:, 2] * lks_mask_new[:, 3] + lks_mask_new[:, 4] = lks_mask_new[:, 4] * lks_mask_new[:, 5] + lks_mask_new[:, 5] = lks_mask_new[:, 4] * lks_mask_new[:, 5] + lks_mask_new[:, 6] = lks_mask_new[:, 6] * lks_mask_new[:, 7] + lks_mask_new[:, 7] = lks_mask_new[:, 6] * lks_mask_new[:, 7] + lks_mask_new[:, 8] = lks_mask_new[:, 8] * lks_mask_new[:, 9] + lks_mask_new[:, 9] = lks_mask_new[:, 8] * lks_mask_new[:, 9] + ''' + lks_mask_new = lks_mask + lmks_mask.append(lks_mask_new) + landmarks.append(lks) + #print('lks: ', lks.size()) + + return tcls, tbox, indices, anch, landmarks, lmks_mask diff --git a/utils/metrics.py b/utils/metrics.py new file mode 100644 index 0000000..99d5bcf --- /dev/null +++ b/utils/metrics.py @@ -0,0 +1,200 @@ +# Model validation metrics + +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from . import general + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes = np.unique(target_cls) + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898 + s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95) + ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = (target_cls == c).sum() # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + 1e-16) # recall curve + r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and (j == 0): + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 score (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + 1e-16) + + if plot: + plot_pr_curve(px, py, ap, save_dir, names) + + return p, r, ap, f1, unique_classes.astype('int32') + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) + mpre = np.concatenate(([1.], precision, [0.])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = general.box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[gc, detection_classes[m1[j]]] += 1 # correct + else: + self.matrix[gc, self.nc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[self.nc, dc] += 1 # background FN + + def matrix(self): + return self.matrix + + def plot(self, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size + labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels + sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, + xticklabels=names + ['background FN'] if labels else "auto", + yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + except Exception as e: + pass + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + +def plot_pr_curve(px, py, ap, save_dir='.', names=()): + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # show mAP in legend if < 10 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250) diff --git a/utils/plots.py b/utils/plots.py new file mode 100644 index 0000000..0c008f1 --- /dev/null +++ b/utils/plots.py @@ -0,0 +1,413 @@ +# Plotting utils + +import glob +import math +import os +import random +from copy import copy +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns +import torch +import yaml +from PIL import Image, ImageDraw +from scipy.signal import butter, filtfilt + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import fitness + +# Settings +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +def color_list(): + # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb + def hex2rgb(h): + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']] + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def plot_one_box(x, img, color=None, label=None, line_thickness=None): + # Plots one bounding box on image img + tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + +def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() + # Compares the two methods for width-height anchor multiplication + # https://github.com/ultralytics/yolov3/issues/168 + x = np.arange(-4.0, 4.0, .1) + ya = np.exp(x) + yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 + + fig = plt.figure(figsize=(6, 3), tight_layout=True) + plt.plot(x, ya, '.-', label='YOLOv3') + plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2') + plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6') + plt.xlim(left=-4, right=4) + plt.ylim(bottom=0, top=6) + plt.xlabel('input') + plt.ylabel('output') + plt.grid() + plt.legend() + fig.savefig('comparison.png', dpi=200) + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): + # Plot image grid with labels + + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + # un-normalise + if np.max(images[0]) <= 1: + images *= 255 + + tl = 3 # line thickness + tf = max(tl - 1, 1) # font thickness + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Check if we should resize + scale_factor = max_size / max(h, w) + if scale_factor < 1: + h = math.ceil(scale_factor * h) + w = math.ceil(scale_factor * w) + + # colors = color_list() # list of colors + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, img in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + + block_x = int(w * (i // ns)) + block_y = int(h * (i % ns)) + + img = img.transpose(1, 2, 0) + if scale_factor < 1: + img = cv2.resize(img, (w, h)) + + mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + if len(targets) > 0: + image_targets = targets[targets[:, 0] == i] + boxes = xywh2xyxy(image_targets[:, 2:6]).T + classes = image_targets[:, 1].astype('int') + labels = image_targets.shape[1] == 6 # labels if no conf column + conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale_factor < 1: # absolute coords need scale if image scales + boxes *= scale_factor + boxes[[0, 2]] += block_x + boxes[[1, 3]] += block_y + for j, box in enumerate(boxes.T): + cls = int(classes[j]) + # color = colors[cls % len(colors)] + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) + plot_one_box(box, mosaic, label=label, color=None, line_thickness=tl) + + # Draw image filename labels + if paths: + label = Path(paths[i]).name[:40] # trim to 40 char + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, + lineType=cv2.LINE_AA) + + # Image border + cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) + + if fname: + r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size + mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) + # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save + Image.fromarray(mosaic).save(fname) # PIL save + return mosaic + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_test_txt(): # from utils.plots import *; plot_test() + # Plot test.txt histograms + x = np.loadtxt('test.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_study_txt(path='study/', x=None): # from utils.plots import *; plot_study_txt() + # Plot study.txt generated by test.py + fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) + ax = ax.ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]: + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + + ax2.grid() + ax2.set_yticks(np.arange(30, 60, 5)) + ax2.set_xlim(0, 30) + ax2.set_ylim(29, 51) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + plt.savefig('test_study.png', dpi=300) + + +def plot_labels(labels, save_dir=Path(''), loggers=None): + # plot dataset labels + print('Plotting labels... ') + c, b = labels[:, 0], labels[:, 1:5].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + colors = color_list() + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + ax[0].set_xlabel('classes') + sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + # for cls, *box in labels[:1000]: + # ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + # loggers + for k, v in loggers.items() or {}: + if k == 'wandb' and v: + v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}) + + +def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() + # Plot hyperparameter evolution results in evolve.txt + with open(yaml_file) as f: + hyp = yaml.load(f, Loader=yaml.SafeLoader) + x = np.loadtxt('evolve.txt', ndmin=2) + f = fitness(x) + # weights = (f - f.min()) ** 2 # for weighted results + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + for i, (k, v) in enumerate(hyp.items()): + y = x[:, i + 7] + # mu = (y * weights).sum() / weights.sum() # best weighted result + mu = y[f.argmax()] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print('%15s: %.3g' % (k, mu)) + plt.savefig('evolve.png', dpi=200) + print('\nPlot saved as evolve.png') + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() + # Plot training 'results*.txt', overlaying train and val losses + s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends + t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles + for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) + ax = ax.ravel() + for i in range(5): + for j in [i, i + 5]: + y = results[j, x] + ax[i].plot(x, y, marker='.', label=s[j]) + # y_smooth = butter_lowpass_filtfilt(y) + # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) + + ax[i].set_title(t[i]) + ax[i].legend() + ax[i].set_ylabel(f) if i == 0 else None # add filename + fig.savefig(f.replace('.txt', '.png'), dpi=200) + + +def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): + # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', + 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] + if bucket: + # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] + files = ['results%g.txt' % x for x in id] + c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) + os.system(c) + else: + files = list(Path(save_dir).glob('results*.txt')) + assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + for i in range(10): + y = results[i, x] + if i in [0, 1, 2, 5, 6, 7]: + y[y == 0] = np.nan # don't show zero loss values + # y /= y[0] # normalize + label = labels[fi] if len(labels) else f.stem + ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) + ax[i].set_title(s[i]) + # if i in [5, 6, 7]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + fig.savefig(Path(save_dir) / 'results.png', dpi=200) diff --git a/utils/torch_utils.py b/utils/torch_utils.py new file mode 100644 index 0000000..2cb09e7 --- /dev/null +++ b/utils/torch_utils.py @@ -0,0 +1,294 @@ +# PyTorch utils + +import logging +import math +import os +import subprocess +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +try: + import thop # for FLOPS computation +except ImportError: + thop = None +logger = logging.getLogger(__name__) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """ + Decorator to make all processes in distributed training wait for each local_master to do something. + """ + if local_rank not in [-1, 0]: + torch.distributed.barrier() + yield + if local_rank == 0: + torch.distributed.barrier() + + +def init_torch_seeds(seed=0): + # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html + torch.manual_seed(seed) + if seed == 0: # slower, more reproducible + cudnn.benchmark, cudnn.deterministic = False, True + else: # faster, less reproducible + cudnn.benchmark, cudnn.deterministic = True, False + + +def git_describe(): + # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + if Path('.git').exists(): + return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1] + else: + return '' + + +def select_device(device='', batch_size=None): + # device = 'cpu' or '0' or '0,1,2,3' + s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string + cpu = device.lower() == 'cpu' + if cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable + assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + + cuda = not cpu and torch.cuda.is_available() + if cuda: + n = torch.cuda.device_count() + if n > 1 and batch_size: # check that batch_size is compatible with device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * len(s) + for i, d in enumerate(device.split(',') if device else range(n)): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB + else: + s += 'CPU\n' + + logger.info(s) # skip a line + return torch.device('cuda:0' if cuda else 'cpu') + + +def time_synchronized(): + # pytorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(x, ops, n=100, device=None): + # profile a pytorch module or list of modules. Example usage: + # x = torch.randn(16, 3, 640, 640) # input + # m1 = lambda x: x * torch.sigmoid(x) + # m2 = nn.SiLU() + # profile(x, [m1, m2], n=100) # profile speed over 100 iterations + + device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + x = x.to(device) + x.requires_grad = True + print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') + print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type + dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + except: + flops = 0 + + for _ in range(n): + t[0] = time_synchronized() + y = m(x) + t[1] = time_synchronized() + try: + _ = y.sum().backward() + t[2] = time_synchronized() + except: # no backward method + t[2] = float('nan') + dtf += (t[1] - t[0]) * 1000 / n # ms per op forward + dtb += (t[2] - t[1]) * 1000 / n # ms per op backward + + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + + +def is_parallel(model): + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0., 0. + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def fuse_conv_and_bn(conv, bn): + # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) + + # prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, img_size=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPS + from thop import profile + stride = int(model.stride.max()) if hasattr(model, 'stride') else 32 + img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float + fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + except (ImportError, Exception): + fs = '' + + logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def load_classifier(name='resnet101', n=2): + # Loads a pretrained model reshaped to n-class output + model = torchvision.models.__dict__[name](pretrained=True) + + # ResNet model properties + # input_size = [3, 224, 224] + # input_space = 'RGB' + # input_range = [0, 1] + # mean = [0.485, 0.456, 0.406] + # std = [0.229, 0.224, 0.225] + + # Reshape output to n classes + filters = model.fc.weight.shape[1] + model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) + model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) + model.fc.out_features = n + return model + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + else: + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +class ModelEMA: + """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + + def __init__(self, model, decay=0.9999, updates=0): + # Create EMA + self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + with torch.no_grad(): + self.updates += 1 + d = self.decay(self.updates) + + msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1. - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) diff --git a/utils/wandb_logging/__init__.py b/utils/wandb_logging/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py new file mode 100644 index 0000000..d7a521f --- /dev/null +++ b/utils/wandb_logging/log_dataset.py @@ -0,0 +1,24 @@ +import argparse + +import yaml + +from wandb_utils import WandbLogger + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + with open(opt.data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + opt = parser.parse_args() + opt.resume = False # Explicitly disallow resume check for dataset upload job + + create_dataset_artifact(opt) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py new file mode 100644 index 0000000..d8f50ae --- /dev/null +++ b/utils/wandb_logging/wandb_utils.py @@ -0,0 +1,306 @@ +import json +import sys +from pathlib import Path + +import torch +import yaml +from tqdm import tqdm + +sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path +from utils.datasets import LoadImagesAndLabels +from utils.datasets import img2label_paths +from utils.general import colorstr, xywh2xyxy, check_dataset + +try: + import wandb + from wandb import init, finish +except ImportError: + wandb = None + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): + return from_string[len(prefix):] + + +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return run_id, project, model_artifact_name + + +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + if opt.global_rank not in [-1, 0]: # For resuming DDP runs + run_id, project, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True + return None + + +def process_wandb_config_ddp_mode(opt): + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + train_dir, val_dir = None, None + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.dump(data_dict, f) + opt.data = ddp_data_path + + +class WandbLogger(): + def __init__(self, opt, name, run_id, data_dict, job_type='Training'): + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict + # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + run_id, project, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') + opt.resume = model_artifact_name + elif self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + name=name, + job_type=job_type, + id=run_id) if not wandb.run else wandb.run + if self.wandb_run: + if self.job_type == 'Training': + if not opt.resume: + wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict + # Info useful for resuming from artifacts + self.wandb_run.config.opt = vars(opt) + self.wandb_run.config.data_dict = wandb_data_dict + self.data_dict = self.setup_training(opt, data_dict) + if self.job_type == 'Dataset Creation': + self.data_dict = self.check_and_upload_dataset(opt) + else: + prefix = colorstr('wandb: ') + print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") + + def check_and_upload_dataset(self, opt): + assert wandb, 'Install wandb to upload dataset' + check_dataset(self.data_dict) + config_path = self.log_dataset_artifact(opt.data, + opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + print("Created dataset config file ", config_path) + with open(config_path) as f: + wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) + return wandb_data_dict + + def setup_training(self, opt, data_dict): + self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( + self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ + config.opt['hyp'] + data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume + if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), + opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), + opt.artifact_alias) + self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + self.val_table = self.val_artifact.get("val") + self.map_val_table_path() + if self.val_artifact is not None: + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + return data_dict + + def download_dataset_artifact(self, path, alias): + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( + total_epochs) + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score + }) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + print("Saving model artifact on epoch ", epoch + 1) + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + with open(data_file) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train']), names, name='train') if data.get('train') else None + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['val']), names, name='val') if data.get('val') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path + data.pop('download', None) + with open(path, 'w') as f: + yaml.dump(data, f) + + if self.job_type == 'Training': # builds correct artifact pipeline graph + self.wandb_run.use_artifact(self.val_artifact) + self.wandb_run.use_artifact(self.train_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + self.val_table_map = {} + print("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_map[data[3]] = data[0] + + def create_dataset_table(self, dataset, class_to_id, name='dataset'): + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.img_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), + name='data/labels/' + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + height, width = shapes[0] + labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) + box_data, img_classes = [], {} + for cls, *xyxy in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls]), + "scores": {"acc": 1}, + "domain": "pixel"}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + if self.val_table and self.result_table: + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + total_conf = 0 + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"}) + total_conf = total_conf + conf + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)) + ) + + def log(self, log_dict): + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + if self.wandb_run: + wandb.log(self.log_dict) + self.log_dict = {} + if self.result_artifact: + train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") + self.result_artifact.add(train_results, 'result') + wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + if self.wandb_run: + if self.log_dict: + wandb.log(self.log_dict) + wandb.run.finish() diff --git a/weights/download_weights.sh b/weights/download_weights.sh new file mode 100644 index 0000000..8be88a6 --- /dev/null +++ b/weights/download_weights.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# Download latest models from https://github.com/ultralytics/yolov5/releases +# Usage: +# $ bash weights/download_weights.sh + +python3 - < -g +```` + +## Bugs & Problems +please issue + +## Acknowledgements + +some code borrowed from Sergey Karayev diff --git a/widerface_evaluate/box_overlaps.c b/widerface_evaluate/box_overlaps.c new file mode 100644 index 0000000..9a890a0 --- /dev/null +++ b/widerface_evaluate/box_overlaps.c @@ -0,0 +1,7813 @@ +/* Generated by Cython 0.29.21 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "depends": [ + "/home/deepcam/miniconda3/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h", + "/home/deepcam/miniconda3/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h" + ], + "include_dirs": [ + "/home/deepcam/miniconda3/lib/python3.7/site-packages/numpy/core/include" + ], + "name": "bbox", + "sources": [ + "box_overlaps.pyx" + ] + }, + "module_name": "bbox" +} +END: Cython Metadata */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_21" +#define CYTHON_HEX_VERSION 0x001D15F0 +#define CYTHON_FUTURE_DIVISION 0 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__bbox +#define __PYX_HAVE_API__bbox +/* Early includes */ +#include +#include +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" + + /* NumPy API declarations from "numpy/__init__.pxd" */ + +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +static const char *__pyx_f[] = { + "box_overlaps.pyx", + "__init__.pxd", + "type.pxd", +}; +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":697 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":698 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":699 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":700 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":704 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":705 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":706 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":707 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":711 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":712 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":721 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":722 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":723 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":725 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":726 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":727 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":729 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":730 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":732 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":733 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":734 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; + +/* "box_overlaps.pyx":13 + * + * DTYPE = np.float + * ctypedef np.float_t DTYPE_t # <<<<<<<<<<<<<< + * + * def bbox_overlaps( + */ +typedef __pyx_t_5numpy_float_t __pyx_t_4bbox_DTYPE_t; +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + + +/*--- Type declarations ---*/ + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":736 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":737 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":738 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":740 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* BufferGetAndValidate.proto */ +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ + ((obj == Py_None || obj == NULL) ?\ + (__Pyx_ZeroBuffer(buf), 0) :\ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); +static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* BufferIndexError.proto */ +static void __Pyx_RaiseBufferIndexError(int axis); + +#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* DictGetItem.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); +#define __Pyx_PyObject_Dict_GetItem(obj, name)\ + (likely(PyDict_CheckExact(obj)) ?\ + __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) +#else +#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* TypeImport.proto */ +#ifndef __PYX_HAVE_RT_ImportType_proto +#define __PYX_HAVE_RT_ImportType_proto +enum __Pyx_ImportType_CheckSize { + __Pyx_ImportType_CheckSize_Error = 0, + __Pyx_ImportType_CheckSize_Warn = 1, + __Pyx_ImportType_CheckSize_Ignore = 2 +}; +static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cython' */ + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'numpy' */ + +/* Module declarations from 'numpy' */ +static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; +static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; +static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; +static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; +static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ + +/* Module declarations from 'bbox' */ +static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_4bbox_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_4bbox_DTYPE_t), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "bbox" +extern int __pyx_module_is_main_bbox; +int __pyx_module_is_main_bbox = 0; + +/* Implementation of 'bbox' */ +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_RuntimeError; +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_ImportError; +static const char __pyx_k_K[] = "K"; +static const char __pyx_k_N[] = "N"; +static const char __pyx_k_k[] = "k"; +static const char __pyx_k_n[] = "n"; +static const char __pyx_k_ih[] = "ih"; +static const char __pyx_k_iw[] = "iw"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_ua[] = "ua"; +static const char __pyx_k_bbox[] = "bbox"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_name[] = "__name__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_DTYPE[] = "DTYPE"; +static const char __pyx_k_boxes[] = "boxes"; +static const char __pyx_k_dtype[] = "dtype"; +static const char __pyx_k_float[] = "float"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_zeros[] = "zeros"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_box_area[] = "box_area"; +static const char __pyx_k_overlaps[] = "overlaps"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_query_boxes[] = "query_boxes"; +static const char __pyx_k_RuntimeError[] = "RuntimeError"; +static const char __pyx_k_bbox_overlaps[] = "bbox_overlaps"; +static const char __pyx_k_box_overlaps_pyx[] = "box_overlaps.pyx"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; +static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; +static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; +static PyObject *__pyx_n_s_DTYPE; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; +static PyObject *__pyx_n_s_ImportError; +static PyObject *__pyx_n_s_K; +static PyObject *__pyx_n_s_N; +static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; +static PyObject *__pyx_n_s_RuntimeError; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_n_s_bbox; +static PyObject *__pyx_n_s_bbox_overlaps; +static PyObject *__pyx_n_s_box_area; +static PyObject *__pyx_kp_s_box_overlaps_pyx; +static PyObject *__pyx_n_s_boxes; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_dtype; +static PyObject *__pyx_n_s_float; +static PyObject *__pyx_n_s_ih; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_iw; +static PyObject *__pyx_n_s_k; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_n; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_n_s_np; +static PyObject *__pyx_n_s_numpy; +static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; +static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; +static PyObject *__pyx_n_s_overlaps; +static PyObject *__pyx_n_s_query_boxes; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_ua; +static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; +static PyObject *__pyx_n_s_zeros; +static PyObject *__pyx_pf_4bbox_bbox_overlaps(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes, PyArrayObject *__pyx_v_query_boxes); /* proto */ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_codeobj__7; +/* Late includes */ + +/* "box_overlaps.pyx":15 + * ctypedef np.float_t DTYPE_t + * + * def bbox_overlaps( # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] boxes, + * np.ndarray[DTYPE_t, ndim=2] query_boxes): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_4bbox_1bbox_overlaps(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_4bbox_bbox_overlaps[] = "\n Parameters\n ----------\n boxes: (N, 4) ndarray of float\n query_boxes: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n "; +static PyMethodDef __pyx_mdef_4bbox_1bbox_overlaps = {"bbox_overlaps", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4bbox_1bbox_overlaps, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4bbox_bbox_overlaps}; +static PyObject *__pyx_pw_4bbox_1bbox_overlaps(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_boxes = 0; + PyArrayObject *__pyx_v_query_boxes = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("bbox_overlaps (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_boxes,&__pyx_n_s_query_boxes,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_boxes)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_query_boxes)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("bbox_overlaps", 1, 2, 2, 1); __PYX_ERR(0, 15, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bbox_overlaps") < 0)) __PYX_ERR(0, 15, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_boxes = ((PyArrayObject *)values[0]); + __pyx_v_query_boxes = ((PyArrayObject *)values[1]); + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("bbox_overlaps", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 15, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("bbox.bbox_overlaps", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_boxes), __pyx_ptype_5numpy_ndarray, 1, "boxes", 0))) __PYX_ERR(0, 16, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_query_boxes), __pyx_ptype_5numpy_ndarray, 1, "query_boxes", 0))) __PYX_ERR(0, 17, __pyx_L1_error) + __pyx_r = __pyx_pf_4bbox_bbox_overlaps(__pyx_self, __pyx_v_boxes, __pyx_v_query_boxes); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_4bbox_bbox_overlaps(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes, PyArrayObject *__pyx_v_query_boxes) { + unsigned int __pyx_v_N; + unsigned int __pyx_v_K; + PyArrayObject *__pyx_v_overlaps = 0; + __pyx_t_4bbox_DTYPE_t __pyx_v_iw; + __pyx_t_4bbox_DTYPE_t __pyx_v_ih; + __pyx_t_4bbox_DTYPE_t __pyx_v_box_area; + __pyx_t_4bbox_DTYPE_t __pyx_v_ua; + unsigned int __pyx_v_k; + unsigned int __pyx_v_n; + __Pyx_LocalBuf_ND __pyx_pybuffernd_boxes; + __Pyx_Buffer __pyx_pybuffer_boxes; + __Pyx_LocalBuf_ND __pyx_pybuffernd_overlaps; + __Pyx_Buffer __pyx_pybuffer_overlaps; + __Pyx_LocalBuf_ND __pyx_pybuffernd_query_boxes; + __Pyx_Buffer __pyx_pybuffer_query_boxes; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyArrayObject *__pyx_t_5 = NULL; + unsigned int __pyx_t_6; + unsigned int __pyx_t_7; + unsigned int __pyx_t_8; + size_t __pyx_t_9; + Py_ssize_t __pyx_t_10; + int __pyx_t_11; + size_t __pyx_t_12; + Py_ssize_t __pyx_t_13; + size_t __pyx_t_14; + Py_ssize_t __pyx_t_15; + size_t __pyx_t_16; + Py_ssize_t __pyx_t_17; + unsigned int __pyx_t_18; + unsigned int __pyx_t_19; + unsigned int __pyx_t_20; + __pyx_t_4bbox_DTYPE_t __pyx_t_21; + __pyx_t_4bbox_DTYPE_t __pyx_t_22; + __pyx_t_4bbox_DTYPE_t __pyx_t_23; + __pyx_t_4bbox_DTYPE_t __pyx_t_24; + int __pyx_t_25; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("bbox_overlaps", 0); + __pyx_pybuffer_overlaps.pybuffer.buf = NULL; + __pyx_pybuffer_overlaps.refcount = 0; + __pyx_pybuffernd_overlaps.data = NULL; + __pyx_pybuffernd_overlaps.rcbuffer = &__pyx_pybuffer_overlaps; + __pyx_pybuffer_boxes.pybuffer.buf = NULL; + __pyx_pybuffer_boxes.refcount = 0; + __pyx_pybuffernd_boxes.data = NULL; + __pyx_pybuffernd_boxes.rcbuffer = &__pyx_pybuffer_boxes; + __pyx_pybuffer_query_boxes.pybuffer.buf = NULL; + __pyx_pybuffer_query_boxes.refcount = 0; + __pyx_pybuffernd_query_boxes.data = NULL; + __pyx_pybuffernd_query_boxes.rcbuffer = &__pyx_pybuffer_query_boxes; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_boxes.rcbuffer->pybuffer, (PyObject*)__pyx_v_boxes, &__Pyx_TypeInfo_nn___pyx_t_4bbox_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 15, __pyx_L1_error) + } + __pyx_pybuffernd_boxes.diminfo[0].strides = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_boxes.diminfo[0].shape = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_boxes.diminfo[1].strides = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_boxes.diminfo[1].shape = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.shape[1]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_query_boxes.rcbuffer->pybuffer, (PyObject*)__pyx_v_query_boxes, &__Pyx_TypeInfo_nn___pyx_t_4bbox_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 15, __pyx_L1_error) + } + __pyx_pybuffernd_query_boxes.diminfo[0].strides = __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_query_boxes.diminfo[0].shape = __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_query_boxes.diminfo[1].strides = __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_query_boxes.diminfo[1].shape = __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.shape[1]; + + /* "box_overlaps.pyx":27 + * overlaps: (N, K) ndarray of overlap between boxes and query_boxes + * """ + * cdef unsigned int N = boxes.shape[0] # <<<<<<<<<<<<<< + * cdef unsigned int K = query_boxes.shape[0] + * cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE) + */ + __pyx_v_N = (__pyx_v_boxes->dimensions[0]); + + /* "box_overlaps.pyx":28 + * """ + * cdef unsigned int N = boxes.shape[0] + * cdef unsigned int K = query_boxes.shape[0] # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE) + * cdef DTYPE_t iw, ih, box_area + */ + __pyx_v_K = (__pyx_v_query_boxes->dimensions[0]); + + /* "box_overlaps.pyx":29 + * cdef unsigned int N = boxes.shape[0] + * cdef unsigned int K = query_boxes.shape[0] + * cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef DTYPE_t iw, ih, box_area + * cdef DTYPE_t ua + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyInt_From_unsigned_int(__pyx_v_N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyInt_From_unsigned_int(__pyx_v_K); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); + __pyx_t_1 = 0; + __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_DTYPE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 29, __pyx_L1_error) + __pyx_t_5 = ((PyArrayObject *)__pyx_t_1); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_overlaps.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_4bbox_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { + __pyx_v_overlaps = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 29, __pyx_L1_error) + } else {__pyx_pybuffernd_overlaps.diminfo[0].strides = __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_overlaps.diminfo[0].shape = __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_overlaps.diminfo[1].strides = __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_overlaps.diminfo[1].shape = __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.shape[1]; + } + } + __pyx_t_5 = 0; + __pyx_v_overlaps = ((PyArrayObject *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "box_overlaps.pyx":33 + * cdef DTYPE_t ua + * cdef unsigned int k, n + * for k in range(K): # <<<<<<<<<<<<<< + * box_area = ( + * (query_boxes[k, 2] - query_boxes[k, 0] + 1) * + */ + __pyx_t_6 = __pyx_v_K; + __pyx_t_7 = __pyx_t_6; + for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { + __pyx_v_k = __pyx_t_8; + + /* "box_overlaps.pyx":35 + * for k in range(K): + * box_area = ( + * (query_boxes[k, 2] - query_boxes[k, 0] + 1) * # <<<<<<<<<<<<<< + * (query_boxes[k, 3] - query_boxes[k, 1] + 1) + * ) + */ + __pyx_t_9 = __pyx_v_k; + __pyx_t_10 = 2; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_9 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_10 < 0) { + __pyx_t_10 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 35, __pyx_L1_error) + } + __pyx_t_12 = __pyx_v_k; + __pyx_t_13 = 0; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_12 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_13 < 0) { + __pyx_t_13 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 35, __pyx_L1_error) + } + + /* "box_overlaps.pyx":36 + * box_area = ( + * (query_boxes[k, 2] - query_boxes[k, 0] + 1) * + * (query_boxes[k, 3] - query_boxes[k, 1] + 1) # <<<<<<<<<<<<<< + * ) + * for n in range(N): + */ + __pyx_t_14 = __pyx_v_k; + __pyx_t_15 = 3; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_14 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_15 < 0) { + __pyx_t_15 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_15 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_15 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 36, __pyx_L1_error) + } + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 1; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 36, __pyx_L1_error) + } + + /* "box_overlaps.pyx":35 + * for k in range(K): + * box_area = ( + * (query_boxes[k, 2] - query_boxes[k, 0] + 1) * # <<<<<<<<<<<<<< + * (query_boxes[k, 3] - query_boxes[k, 1] + 1) + * ) + */ + __pyx_v_box_area = ((((*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_query_boxes.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_query_boxes.diminfo[1].strides))) + 1.0) * (((*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_query_boxes.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides))) + 1.0)); + + /* "box_overlaps.pyx":38 + * (query_boxes[k, 3] - query_boxes[k, 1] + 1) + * ) + * for n in range(N): # <<<<<<<<<<<<<< + * iw = ( + * min(boxes[n, 2], query_boxes[k, 2]) - + */ + __pyx_t_18 = __pyx_v_N; + __pyx_t_19 = __pyx_t_18; + for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) { + __pyx_v_n = __pyx_t_20; + + /* "box_overlaps.pyx":40 + * for n in range(N): + * iw = ( + * min(boxes[n, 2], query_boxes[k, 2]) - # <<<<<<<<<<<<<< + * max(boxes[n, 0], query_boxes[k, 0]) + 1 + * ) + */ + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 2; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 40, __pyx_L1_error) + } + __pyx_t_21 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides)); + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 2; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 40, __pyx_L1_error) + } + __pyx_t_22 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)); + if (((__pyx_t_21 < __pyx_t_22) != 0)) { + __pyx_t_23 = __pyx_t_21; + } else { + __pyx_t_23 = __pyx_t_22; + } + + /* "box_overlaps.pyx":41 + * iw = ( + * min(boxes[n, 2], query_boxes[k, 2]) - + * max(boxes[n, 0], query_boxes[k, 0]) + 1 # <<<<<<<<<<<<<< + * ) + * if iw > 0: + */ + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 0; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 41, __pyx_L1_error) + } + __pyx_t_21 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides)); + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 0; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 41, __pyx_L1_error) + } + __pyx_t_22 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)); + if (((__pyx_t_21 > __pyx_t_22) != 0)) { + __pyx_t_24 = __pyx_t_21; + } else { + __pyx_t_24 = __pyx_t_22; + } + + /* "box_overlaps.pyx":40 + * for n in range(N): + * iw = ( + * min(boxes[n, 2], query_boxes[k, 2]) - # <<<<<<<<<<<<<< + * max(boxes[n, 0], query_boxes[k, 0]) + 1 + * ) + */ + __pyx_v_iw = ((__pyx_t_23 - __pyx_t_24) + 1.0); + + /* "box_overlaps.pyx":43 + * max(boxes[n, 0], query_boxes[k, 0]) + 1 + * ) + * if iw > 0: # <<<<<<<<<<<<<< + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - + */ + __pyx_t_25 = ((__pyx_v_iw > 0.0) != 0); + if (__pyx_t_25) { + + /* "box_overlaps.pyx":45 + * if iw > 0: + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - # <<<<<<<<<<<<<< + * max(boxes[n, 1], query_boxes[k, 1]) + 1 + * ) + */ + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 3; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 45, __pyx_L1_error) + } + __pyx_t_24 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides)); + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 3; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 45, __pyx_L1_error) + } + __pyx_t_23 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)); + if (((__pyx_t_24 < __pyx_t_23) != 0)) { + __pyx_t_21 = __pyx_t_24; + } else { + __pyx_t_21 = __pyx_t_23; + } + + /* "box_overlaps.pyx":46 + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - + * max(boxes[n, 1], query_boxes[k, 1]) + 1 # <<<<<<<<<<<<<< + * ) + * if ih > 0: + */ + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 1; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 46, __pyx_L1_error) + } + __pyx_t_24 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides)); + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 1; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 46, __pyx_L1_error) + } + __pyx_t_23 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)); + if (((__pyx_t_24 > __pyx_t_23) != 0)) { + __pyx_t_22 = __pyx_t_24; + } else { + __pyx_t_22 = __pyx_t_23; + } + + /* "box_overlaps.pyx":45 + * if iw > 0: + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - # <<<<<<<<<<<<<< + * max(boxes[n, 1], query_boxes[k, 1]) + 1 + * ) + */ + __pyx_v_ih = ((__pyx_t_21 - __pyx_t_22) + 1.0); + + /* "box_overlaps.pyx":48 + * max(boxes[n, 1], query_boxes[k, 1]) + 1 + * ) + * if ih > 0: # <<<<<<<<<<<<<< + * ua = float( + * (boxes[n, 2] - boxes[n, 0] + 1) * + */ + __pyx_t_25 = ((__pyx_v_ih > 0.0) != 0); + if (__pyx_t_25) { + + /* "box_overlaps.pyx":50 + * if ih > 0: + * ua = float( + * (boxes[n, 2] - boxes[n, 0] + 1) * # <<<<<<<<<<<<<< + * (boxes[n, 3] - boxes[n, 1] + 1) + + * box_area - iw * ih + */ + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 2; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 50, __pyx_L1_error) + } + __pyx_t_14 = __pyx_v_n; + __pyx_t_15 = 0; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_14 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_15 < 0) { + __pyx_t_15 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_15 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_15 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 50, __pyx_L1_error) + } + + /* "box_overlaps.pyx":51 + * ua = float( + * (boxes[n, 2] - boxes[n, 0] + 1) * + * (boxes[n, 3] - boxes[n, 1] + 1) + # <<<<<<<<<<<<<< + * box_area - iw * ih + * ) + */ + __pyx_t_12 = __pyx_v_n; + __pyx_t_13 = 3; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_12 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_13 < 0) { + __pyx_t_13 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 51, __pyx_L1_error) + } + __pyx_t_9 = __pyx_v_n; + __pyx_t_10 = 1; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_9 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_10 < 0) { + __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 51, __pyx_L1_error) + } + + /* "box_overlaps.pyx":49 + * ) + * if ih > 0: + * ua = float( # <<<<<<<<<<<<<< + * (boxes[n, 2] - boxes[n, 0] + 1) * + * (boxes[n, 3] - boxes[n, 1] + 1) + + */ + __pyx_v_ua = ((double)((((((*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_boxes.diminfo[1].strides))) + 1.0) * (((*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_boxes.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides))) + 1.0)) + __pyx_v_box_area) - (__pyx_v_iw * __pyx_v_ih))); + + /* "box_overlaps.pyx":54 + * box_area - iw * ih + * ) + * overlaps[n, k] = iw * ih / ua # <<<<<<<<<<<<<< + * return overlaps + */ + __pyx_t_22 = (__pyx_v_iw * __pyx_v_ih); + if (unlikely(__pyx_v_ua == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 54, __pyx_L1_error) + } + __pyx_t_9 = __pyx_v_n; + __pyx_t_12 = __pyx_v_k; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_9 >= (size_t)__pyx_pybuffernd_overlaps.diminfo[0].shape)) __pyx_t_11 = 0; + if (unlikely(__pyx_t_12 >= (size_t)__pyx_pybuffernd_overlaps.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 54, __pyx_L1_error) + } + *__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_overlaps.diminfo[0].strides, __pyx_t_12, __pyx_pybuffernd_overlaps.diminfo[1].strides) = (__pyx_t_22 / __pyx_v_ua); + + /* "box_overlaps.pyx":48 + * max(boxes[n, 1], query_boxes[k, 1]) + 1 + * ) + * if ih > 0: # <<<<<<<<<<<<<< + * ua = float( + * (boxes[n, 2] - boxes[n, 0] + 1) * + */ + } + + /* "box_overlaps.pyx":43 + * max(boxes[n, 0], query_boxes[k, 0]) + 1 + * ) + * if iw > 0: # <<<<<<<<<<<<<< + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - + */ + } + } + } + + /* "box_overlaps.pyx":55 + * ) + * overlaps[n, k] = iw * ih / ua + * return overlaps # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_overlaps)); + __pyx_r = ((PyObject *)__pyx_v_overlaps); + goto __pyx_L0; + + /* "box_overlaps.pyx":15 + * ctypedef np.float_t DTYPE_t + * + * def bbox_overlaps( # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] boxes, + * np.ndarray[DTYPE_t, ndim=2] query_boxes): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_overlaps.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_query_boxes.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("bbox.bbox_overlaps", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_overlaps.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_query_boxes.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_overlaps); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":742 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":743 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 743, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":742 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":745 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":746 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":745 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":748 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":749 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 749, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":748 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":751 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":752 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":751 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":754 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":755 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 755, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":754 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":757 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":758 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); + if (__pyx_t_1) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":759 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":758 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":761 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":757 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":763 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { + PyArray_Descr *__pyx_v_child = 0; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + PyObject *__pyx_v_fields = 0; + PyObject *__pyx_v_childname = NULL; + PyObject *__pyx_v_new_offset = NULL; + PyObject *__pyx_v_t = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + long __pyx_t_8; + char *__pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_util_dtypestring", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":768 + * + * cdef dtype child + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * cdef tuple fields + */ + __pyx_v_endian_detector = 1; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":769 + * cdef dtype child + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * cdef tuple fields + * + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":772 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + if (unlikely(__pyx_v_descr->names == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); + __PYX_ERR(1, 772, __pyx_L1_error) + } + __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; + for (;;) { + if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 772, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 772, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":773 + * + * for childname in descr.names: + * fields = descr.fields[childname] # <<<<<<<<<<<<<< + * child, new_offset = fields + * + */ + if (unlikely(__pyx_v_descr->fields == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 773, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 773, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 773, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); + __pyx_t_3 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":774 + * for childname in descr.names: + * fields = descr.fields[childname] + * child, new_offset = fields # <<<<<<<<<<<<<< + * + * if (end - f) - (new_offset - offset[0]) < 15: + */ + if (likely(__pyx_v_fields != Py_None)) { + PyObject* sequence = __pyx_v_fields; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 774, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 774, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 774, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 774, __pyx_L1_error) + } + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 774, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":776 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 776, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 776, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 776, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); + if (unlikely(__pyx_t_6)) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":777 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 777, __pyx_L1_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":776 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":779 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); + if (!__pyx_t_7) { + goto __pyx_L8_next_or; + } else { + } + __pyx_t_7 = (__pyx_v_little_endian != 0); + if (!__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_L8_next_or:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":780 + * + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * # One could encode it in the format string and have Cython + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); + if (__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_6 = __pyx_t_7; + __pyx_L7_bool_binop_done:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":779 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (unlikely(__pyx_t_6)) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":781 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 781, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 781, __pyx_L1_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":779 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":791 + * + * # Output padding bytes + * while offset[0] < new_offset: # <<<<<<<<<<<<<< + * f[0] = 120 # "x"; pad byte + * f += 1 + */ + while (1) { + __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 791, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 791, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 791, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_6) break; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":792 + * # Output padding bytes + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< + * f += 1 + * offset[0] += 1 + */ + (__pyx_v_f[0]) = 0x78; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":793 + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte + * f += 1 # <<<<<<<<<<<<<< + * offset[0] += 1 + * + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":794 + * f[0] = 120 # "x"; pad byte + * f += 1 + * offset[0] += 1 # <<<<<<<<<<<<<< + * + * offset[0] += child.itemsize + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":796 + * offset[0] += 1 + * + * offset[0] += child.itemsize # <<<<<<<<<<<<<< + * + * if not PyDataType_HASFIELDS(child): + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":798 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); + if (__pyx_t_6) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":799 + * + * if not PyDataType_HASFIELDS(child): + * t = child.type_num # <<<<<<<<<<<<<< + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") + */ + __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 799, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":800 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); + if (unlikely(__pyx_t_6)) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":801 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 801, __pyx_L1_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":800 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":804 + * + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 804, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 804, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 804, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 98; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":805 + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 805, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 805, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 805, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 66; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":806 + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 806, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 806, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 806, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x68; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":807 + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 807, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 807, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 807, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 72; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":808 + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 808, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 808, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 808, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x69; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":809 + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 809, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 809, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 809, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 73; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":810 + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 810, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 810, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 810, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x6C; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":811 + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 811, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 811, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 811, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 76; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":812 + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 812, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 812, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 812, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x71; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":813 + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 81; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":814 + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 814, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 814, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 814, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x66; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":815 + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 815, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 815, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 815, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x64; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":816 + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 816, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 816, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 816, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x67; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":817 + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 817, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 817, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 817, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x66; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":818 + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x64; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":819 + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x67; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":820 + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (likely(__pyx_t_6)) { + (__pyx_v_f[0]) = 79; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":822 + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * f += 1 + * else: + */ + /*else*/ { + __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 822, __pyx_L1_error) + } + __pyx_L15:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":823 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * f += 1 # <<<<<<<<<<<<<< + * else: + * # Cython ignores struct boundary information ("T{...}"), + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":798 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + goto __pyx_L13; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":827 + * # Cython ignores struct boundary information ("T{...}"), + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< + * return f + * + */ + /*else*/ { + __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 827, __pyx_L1_error) + __pyx_v_f = __pyx_t_9; + } + __pyx_L13:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":772 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":828 + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) + * return f # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_f; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":763 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_child); + __Pyx_XDECREF(__pyx_v_fields); + __Pyx_XDECREF(__pyx_v_childname); + __Pyx_XDECREF(__pyx_v_new_offset); + __Pyx_XDECREF(__pyx_v_t); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":943 + * int _import_umath() except -1 + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":944 + * + * cdef inline void set_array_base(ndarray arr, object base): + * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< + * PyArray_SetBaseObject(arr, base) + * + */ + Py_INCREF(__pyx_v_base); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":945 + * cdef inline void set_array_base(ndarray arr, object base): + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":943 + * int _import_umath() except -1 + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":947 + * PyArray_SetBaseObject(arr, base) + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * base = PyArray_BASE(arr) + * if base is NULL: + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_v_base; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":948 + * + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< + * if base is NULL: + * return None + */ + __pyx_v_base = PyArray_BASE(__pyx_v_arr); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":949 + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) + * if base is NULL: # <<<<<<<<<<<<<< + * return None + * return base + */ + __pyx_t_1 = ((__pyx_v_base == NULL) != 0); + if (__pyx_t_1) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":950 + * base = PyArray_BASE(arr) + * if base is NULL: + * return None # <<<<<<<<<<<<<< + * return base + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":949 + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) + * if base is NULL: # <<<<<<<<<<<<<< + * return None + * return base + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":951 + * if base is NULL: + * return None + * return base # <<<<<<<<<<<<<< + * + * # Versions of the import_* functions which are more suitable for + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_base)); + __pyx_r = ((PyObject *)__pyx_v_base); + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":947 + * PyArray_SetBaseObject(arr, base) + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * base = PyArray_BASE(arr) + * if base is NULL: + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":955 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * __pyx_import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":956 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":957 + * cdef inline int import_array() except -1: + * try: + * __pyx_import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L3_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":956 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":958 + * try: + * __pyx_import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 958, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":959 + * __pyx_import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 959, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 959, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":956 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":955 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * __pyx_import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":961 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":962 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":963 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 963, __pyx_L3_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":962 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":964 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 964, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":965 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 965, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 965, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":962 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":961 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":967 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":968 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":969 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 969, __pyx_L3_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":968 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":970 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 970, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":971 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef extern from *: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 971, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 971, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":968 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":967 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_bbox(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_bbox}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "bbox", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, + {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, + {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_n_s_K, __pyx_k_K, sizeof(__pyx_k_K), 0, 0, 1, 1}, + {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1}, + {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, + {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_bbox, __pyx_k_bbox, sizeof(__pyx_k_bbox), 0, 0, 1, 1}, + {&__pyx_n_s_bbox_overlaps, __pyx_k_bbox_overlaps, sizeof(__pyx_k_bbox_overlaps), 0, 0, 1, 1}, + {&__pyx_n_s_box_area, __pyx_k_box_area, sizeof(__pyx_k_box_area), 0, 0, 1, 1}, + {&__pyx_kp_s_box_overlaps_pyx, __pyx_k_box_overlaps_pyx, sizeof(__pyx_k_box_overlaps_pyx), 0, 0, 1, 0}, + {&__pyx_n_s_boxes, __pyx_k_boxes, sizeof(__pyx_k_boxes), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, + {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, + {&__pyx_n_s_ih, __pyx_k_ih, sizeof(__pyx_k_ih), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_iw, __pyx_k_iw, sizeof(__pyx_k_iw), 0, 0, 1, 1}, + {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, + {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, + {&__pyx_n_s_overlaps, __pyx_k_overlaps, sizeof(__pyx_k_overlaps), 0, 0, 1, 1}, + {&__pyx_n_s_query_boxes, __pyx_k_query_boxes, sizeof(__pyx_k_query_boxes), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_ua, __pyx_k_ua, sizeof(__pyx_k_ua), 0, 0, 1, 1}, + {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, + {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 33, __pyx_L1_error) + __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 777, __pyx_L1_error) + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 781, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 959, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":777 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 777, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":781 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 781, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":801 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":959 + * __pyx_import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 959, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":965 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 965, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "box_overlaps.pyx":15 + * ctypedef np.float_t DTYPE_t + * + * def bbox_overlaps( # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] boxes, + * np.ndarray[DTYPE_t, ndim=2] query_boxes): + */ + __pyx_tuple__6 = PyTuple_Pack(11, __pyx_n_s_boxes, __pyx_n_s_query_boxes, __pyx_n_s_N, __pyx_n_s_K, __pyx_n_s_overlaps, __pyx_n_s_iw, __pyx_n_s_ih, __pyx_n_s_box_area, __pyx_n_s_ua, __pyx_n_s_k, __pyx_n_s_n); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + __pyx_codeobj__7 = (PyObject*)__Pyx_PyCode_New(2, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__6, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_box_overlaps_pyx, __pyx_n_s_bbox_overlaps, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__7)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 207, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 207, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 230, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 234, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 246, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 839, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initbbox(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initbbox(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_bbox(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_bbox(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_bbox(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'bbox' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_bbox(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("bbox", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_bbox) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "bbox")) { + if (unlikely(PyDict_SetItemString(modules, "bbox", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + (void)__Pyx_modinit_type_init_code(); + if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "box_overlaps.pyx":9 + * + * cimport cython + * import numpy as np # <<<<<<<<<<<<<< + * cimport numpy as np + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "box_overlaps.pyx":12 + * cimport numpy as np + * + * DTYPE = np.float # <<<<<<<<<<<<<< + * ctypedef np.float_t DTYPE_t + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) __PYX_ERR(0, 12, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "box_overlaps.pyx":15 + * ctypedef np.float_t DTYPE_t + * + * def bbox_overlaps( # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] boxes, + * np.ndarray[DTYPE_t, ndim=2] query_boxes): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_4bbox_1bbox_overlaps, NULL, __pyx_n_s_bbox); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_bbox_overlaps, __pyx_t_2) < 0) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "box_overlaps.pyx":1 + * # -------------------------------------------------------- # <<<<<<<<<<<<<< + * # Fast R-CNN + * # Copyright (c) 2015 Microsoft + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":967 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init bbox", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init bbox"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t <= '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case '?': return "'bool'"; + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number, ndim; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ndim = ctx->head->field->type->ndim; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + CYTHON_FALLTHROUGH; + case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && + (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + CYTHON_FALLTHROUGH; + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* BufferGetAndValidate */ + static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((size_t)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + +/* PyDictVersioning */ + #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ + #if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* PyObjectCall */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* ExtTypeTest */ + static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* BufferIndexError */ + static void __Pyx_RaiseBufferIndexError(int axis) { + PyErr_Format(PyExc_IndexError, + "Out of bounds on buffer access (axis %d)", axis); +} + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* DictGetItem */ + #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + if (unlikely(PyTuple_Check(key))) { + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) { + PyErr_SetObject(PyExc_KeyError, args); + Py_DECREF(args); + } + } else { + PyErr_SetObject(PyExc_KeyError, key); + } + } + return NULL; + } + Py_INCREF(value); + return value; +} +#endif + +/* RaiseTooManyValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ + static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* RaiseException */ + #if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* PyCFunctionFastCall */ + #if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ + #if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCallMethO */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ + #if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* GetTopmostException */ + #if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* TypeImport */ + #ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, + size_t size, enum __Pyx_ImportType_CheckSize check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if ((size_t)basicsize < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/* Import */ + static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + + /* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) { + const unsigned int neg_one = (unsigned int) ((unsigned int) 0 - (unsigned int) 1), const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(unsigned int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(unsigned int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(unsigned int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = (float)(1.0) / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = (float)(1.0) / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0.0, -1.0); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = (double)(1.0) / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = (double)(1.0) / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0.0, -1.0); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { + const enum NPY_TYPES neg_one = (enum NPY_TYPES) ((enum NPY_TYPES) 0 - (enum NPY_TYPES) 1), const_zero = (enum NPY_TYPES) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(enum NPY_TYPES) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(enum NPY_TYPES) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { + const unsigned int neg_one = (unsigned int) ((unsigned int) 0 - (unsigned int) 1), const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(unsigned int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (unsigned int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { + return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { + return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { + return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (unsigned int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(unsigned int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) + case -2: + if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(unsigned int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + unsigned int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (unsigned int) -1; + } + } else { + unsigned int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (unsigned int) -1; + val = __Pyx_PyInt_As_unsigned_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to unsigned int"); + return (unsigned int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to unsigned int"); + return (unsigned int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; ip) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/widerface_evaluate/evaluation.py b/widerface_evaluate/evaluation.py new file mode 100644 index 0000000..554f278 --- /dev/null +++ b/widerface_evaluate/evaluation.py @@ -0,0 +1,303 @@ +""" +WiderFace evaluation code +author: wondervictor +mail: tianhengcheng@gmail.com +copyright@wondervictor +""" + +import os +import tqdm +import pickle +import argparse +import numpy as np +from scipy.io import loadmat +from bbox import bbox_overlaps +from IPython import embed + + +def get_gt_boxes(gt_dir): + """ gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)""" + + gt_mat = loadmat(os.path.join(gt_dir, 'wider_face_val.mat')) + hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat')) + medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat')) + easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat')) + + facebox_list = gt_mat['face_bbx_list'] + event_list = gt_mat['event_list'] + file_list = gt_mat['file_list'] + + hard_gt_list = hard_mat['gt_list'] + medium_gt_list = medium_mat['gt_list'] + easy_gt_list = easy_mat['gt_list'] + + return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list + + +def get_gt_boxes_from_txt(gt_path, cache_dir): + + cache_file = os.path.join(cache_dir, 'gt_cache.pkl') + if os.path.exists(cache_file): + f = open(cache_file, 'rb') + boxes = pickle.load(f) + f.close() + return boxes + + f = open(gt_path, 'r') + state = 0 + lines = f.readlines() + lines = list(map(lambda x: x.rstrip('\r\n'), lines)) + boxes = {} + print(len(lines)) + f.close() + current_boxes = [] + current_name = None + for line in lines: + if state == 0 and '--' in line: + state = 1 + current_name = line + continue + if state == 1: + state = 2 + continue + + if state == 2 and '--' in line: + state = 1 + boxes[current_name] = np.array(current_boxes).astype('float32') + current_name = line + current_boxes = [] + continue + + if state == 2: + box = [float(x) for x in line.split(' ')[:4]] + current_boxes.append(box) + continue + + f = open(cache_file, 'wb') + pickle.dump(boxes, f) + f.close() + return boxes + + +def read_pred_file(filepath): + + with open(filepath, 'r') as f: + lines = f.readlines() + img_file = lines[0].rstrip('\n\r') + lines = lines[2:] + + # b = lines[0].rstrip('\r\n').split(' ')[:-1] + # c = float(b) + # a = map(lambda x: [[float(a[0]), float(a[1]), float(a[2]), float(a[3]), float(a[4])] for a in x.rstrip('\r\n').split(' ')], lines) + boxes = [] + for line in lines: + line = line.rstrip('\r\n').split(' ') + if line[0] == '': + continue + # a = float(line[4]) + boxes.append([float(line[0]), float(line[1]), float(line[2]), float(line[3]), float(line[4])]) + boxes = np.array(boxes) + # boxes = np.array(list(map(lambda x: [float(a) for a in x.rstrip('\r\n').split(' ')], lines))).astype('float') + return img_file.split('/')[-1], boxes + + +def get_preds(pred_dir): + events = os.listdir(pred_dir) + boxes = dict() + pbar = tqdm.tqdm(events) + + for event in pbar: + pbar.set_description('Reading Predictions ') + event_dir = os.path.join(pred_dir, event) + event_images = os.listdir(event_dir) + current_event = dict() + for imgtxt in event_images: + imgname, _boxes = read_pred_file(os.path.join(event_dir, imgtxt)) + current_event[imgname.rstrip('.jpg')] = _boxes + boxes[event] = current_event + return boxes + + +def norm_score(pred): + """ norm score + pred {key: [[x1,y1,x2,y2,s]]} + """ + + max_score = 0 + min_score = 1 + + for _, k in pred.items(): + for _, v in k.items(): + if len(v) == 0: + continue + _min = np.min(v[:, -1]) + _max = np.max(v[:, -1]) + max_score = max(_max, max_score) + min_score = min(_min, min_score) + + diff = max_score - min_score + for _, k in pred.items(): + for _, v in k.items(): + if len(v) == 0: + continue + v[:, -1] = (v[:, -1] - min_score)/diff + + +def image_eval(pred, gt, ignore, iou_thresh): + """ single image evaluation + pred: Nx5 + gt: Nx4 + ignore: + """ + + _pred = pred.copy() + _gt = gt.copy() + pred_recall = np.zeros(_pred.shape[0]) + recall_list = np.zeros(_gt.shape[0]) + proposal_list = np.ones(_pred.shape[0]) + + _pred[:, 2] = _pred[:, 2] + _pred[:, 0] + _pred[:, 3] = _pred[:, 3] + _pred[:, 1] + _gt[:, 2] = _gt[:, 2] + _gt[:, 0] + _gt[:, 3] = _gt[:, 3] + _gt[:, 1] + + overlaps = bbox_overlaps(_pred[:, :4], _gt) + + for h in range(_pred.shape[0]): + + gt_overlap = overlaps[h] + max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax() + if max_overlap >= iou_thresh: + if ignore[max_idx] == 0: + recall_list[max_idx] = -1 + proposal_list[h] = -1 + elif recall_list[max_idx] == 0: + recall_list[max_idx] = 1 + + r_keep_index = np.where(recall_list == 1)[0] + pred_recall[h] = len(r_keep_index) + return pred_recall, proposal_list + + +def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall): + pr_info = np.zeros((thresh_num, 2)).astype('float') + for t in range(thresh_num): + + thresh = 1 - (t+1)/thresh_num + r_index = np.where(pred_info[:, 4] >= thresh)[0] + if len(r_index) == 0: + pr_info[t, 0] = 0 + pr_info[t, 1] = 0 + else: + r_index = r_index[-1] + p_index = np.where(proposal_list[:r_index+1] == 1)[0] + pr_info[t, 0] = len(p_index) + pr_info[t, 1] = pred_recall[r_index] + return pr_info + + +def dataset_pr_info(thresh_num, pr_curve, count_face): + _pr_curve = np.zeros((thresh_num, 2)) + for i in range(thresh_num): + _pr_curve[i, 0] = pr_curve[i, 1] / pr_curve[i, 0] + _pr_curve[i, 1] = pr_curve[i, 1] / count_face + return _pr_curve + + +def voc_ap(rec, prec): + + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.], rec, [1.])) + mpre = np.concatenate(([0.], prec, [0.])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def evaluation(pred, gt_path, iou_thresh=0.5): + pred = get_preds(pred) + norm_score(pred) + facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(gt_path) + event_num = len(event_list) + thresh_num = 1000 + settings = ['easy', 'medium', 'hard'] + setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list] + aps = [] + for setting_id in range(3): + # different setting + gt_list = setting_gts[setting_id] + count_face = 0 + pr_curve = np.zeros((thresh_num, 2)).astype('float') + # [hard, medium, easy] + pbar = tqdm.tqdm(range(event_num)) + for i in pbar: + pbar.set_description('Processing {}'.format(settings[setting_id])) + event_name = str(event_list[i][0][0]) + img_list = file_list[i][0] + pred_list = pred[event_name] + sub_gt_list = gt_list[i][0] + # img_pr_info_list = np.zeros((len(img_list), thresh_num, 2)) + gt_bbx_list = facebox_list[i][0] + + for j in range(len(img_list)): + pred_info = pred_list[str(img_list[j][0][0])] + + gt_boxes = gt_bbx_list[j][0].astype('float') + keep_index = sub_gt_list[j][0] + count_face += len(keep_index) + + if len(gt_boxes) == 0 or len(pred_info) == 0: + continue + ignore = np.zeros(gt_boxes.shape[0]) + if len(keep_index) != 0: + ignore[keep_index-1] = 1 + pred_recall, proposal_list = image_eval(pred_info, gt_boxes, ignore, iou_thresh) + + _img_pr_info = img_pr_info(thresh_num, pred_info, proposal_list, pred_recall) + + pr_curve += _img_pr_info + pr_curve = dataset_pr_info(thresh_num, pr_curve, count_face) + + propose = pr_curve[:, 0] + recall = pr_curve[:, 1] + + ap = voc_ap(recall, propose) + aps.append(ap) + + print("==================== Results ====================") + print("Easy Val AP: {}".format(aps[0])) + print("Medium Val AP: {}".format(aps[1])) + print("Hard Val AP: {}".format(aps[2])) + print("=================================================") + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('-p', '--pred', default="./widerface_txt/") + parser.add_argument('-g', '--gt', default='./ground_truth/') + + args = parser.parse_args() + evaluation(args.pred, args.gt) + + + + + + + + + + + + diff --git a/widerface_evaluate/setup.py b/widerface_evaluate/setup.py new file mode 100644 index 0000000..74dba05 --- /dev/null +++ b/widerface_evaluate/setup.py @@ -0,0 +1,13 @@ +""" +WiderFace evaluation code +author: wondervictor +mail: tianhengcheng@gmail.com +copyright@wondervictor +""" + +from distutils.core import setup, Extension +from Cython.Build import cythonize +import numpy + +package = Extension('bbox', ['box_overlaps.pyx'], include_dirs=[numpy.get_include()]) +setup(ext_modules=cythonize([package]))