diff --git a/app/api/business/project_api.py b/app/api/business/project_train_api.py
similarity index 96%
rename from app/api/business/project_api.py
rename to app/api/business/project_train_api.py
index a3da4ff..4f6991b 100644
--- a/app/api/business/project_api.py
+++ b/app/api/business/project_train_api.py
@@ -30,6 +30,12 @@ def get_type_list(session: Session = Depends(get_db)):
@project.post("/list")
def project_pager(info: ProjectInfoPager, session: Session = Depends(get_db)):
+ """
+
+ :param info:
+ :param session:
+ :return:
+ """
pager = pic.get_project_pager(info, session)
return rc.response_success_pager(pager)
@@ -120,7 +126,9 @@ def del_label(label_id: int, session: Session = Depends(get_db)):
@project.post("/up_proj_img")
-def upload_project_image(project_id: int = Form(...), files: List[UploadFile] = File(...), session: Session = Depends(get_db)):
+def upload_project_image(project_id: int = Form(...),
+ files: List[UploadFile] = File(...),
+ session: Session = Depends(get_db)):
"""
上传项目图片
:param files: 文件图片
diff --git a/app/api/common/view_img.py b/app/api/common/view_img.py
index 3e0ccd4..f6f5a04 100644
--- a/app/api/common/view_img.py
+++ b/app/api/common/view_img.py
@@ -1,20 +1,41 @@
import os
-from fastapi import APIRouter, HTTPException
+from fastapi import APIRouter, HTTPException, Depends
from starlette.responses import FileResponse
+from sqlalchemy.orm import Session
+from app.model.crud.project_image_crud import get_img_url
from app.config.config_reader import images_url
+from app.db.db_session import get_db
view = APIRouter()
-@view.get("/{file_path:path}")
-def view_img(file_path):
+@view.get("/view_img/{image_id}")
+def view_img(image_id: int, session: Session = Depends(get_db)):
"""
查看图片
- :param file_path: 图片路径
+ :param session:
+ :param image_id: 图片id
:return:
"""
- image_path = os.path.join(images_url, file_path)
+ sour_url, thumb_url = get_img_url(image_id, session)
+ image_path = os.path.join(images_url, sour_url)
+ # 检查文件是否存在以及是否是文件
+ if not os.path.isfile(image_path):
+ raise HTTPException(status_code=404, detail="Image not found")
+ return FileResponse(image_path, media_type='image/jpeg')
+
+
+@view.get("/view_thumb/{image_id}")
+def view_thumb(image_id: int, session: Session = Depends(get_db)):
+ """
+ 查看图片
+ :param session:
+ :param image_id: 图片id
+ :return:
+ """
+ sour_url, thumb_url = get_img_url(image_id, session)
+ image_path = os.path.join(images_url, thumb_url)
# 检查文件是否存在以及是否是文件
if not os.path.isfile(image_path):
raise HTTPException(status_code=404, detail="Image not found")
diff --git a/app/application/app.py b/app/application/app.py
index 143b7b8..a22bc2a 100644
--- a/app/application/app.py
+++ b/app/application/app.py
@@ -7,7 +7,7 @@ from app.application.logger_middleware import LoggerMiddleware
from app.api.common.upload_file import upload_files
from app.api.sys.login_api import login
from app.api.sys.sys_user_api import user
-from app.api.business.project_api import project
+from app.api.business.project_train_api import project
from app.api.common.view_img import view
my_app = FastAPI()
@@ -32,7 +32,7 @@ my_app.add_middleware(TokenMiddleware)
my_app.include_router(login, prefix="/login", tags=["用户登录接口"])
my_app.include_router(upload_files, prefix="/upload", tags=["文件上传API"])
-my_app.include_router(view, prefix="/view_img", tags=["查看图片"])
+my_app.include_router(view, tags=["查看图片"])
my_app.include_router(user, prefix="/user", tags=["用户管理API"])
my_app.include_router(project, prefix="/proj", tags=["项目管理API"])
diff --git a/app/application/token_middleware.py b/app/application/token_middleware.py
index 5d92d71..e1e1349 100644
--- a/app/application/token_middleware.py
+++ b/app/application/token_middleware.py
@@ -5,6 +5,15 @@ from jwt import PyJWTError
from app.common import reponse_code as rc
from app.common import jwt_check as jc
+green = ['/login', '/view_img', 'view_thumb']
+
+
+def check_green(s: str):
+ for url in green:
+ if url in s:
+ return True
+ return False
+
class TokenMiddleware(BaseHTTPMiddleware):
@@ -33,11 +42,3 @@ class TokenMiddleware(BaseHTTPMiddleware):
return rc.response_code_view(status.HTTP_401_UNAUTHORIZED, "Token错误或失效,请重新验证")
-green = ['/login', '/view_img', 'test']
-
-
-def check_green(s: str):
- for url in green:
- if url in s:
- return True
- return False
diff --git a/app/model/crud/project_image_crud.py b/app/model/crud/project_image_crud.py
index 8f51ce0..18e4d8b 100644
--- a/app/model/crud/project_image_crud.py
+++ b/app/model/crud/project_image_crud.py
@@ -14,6 +14,19 @@ def get_image_pager(image: ProjectImagePager, session: Session):
return pager
+def get_img_url(image_id: int, session: Session):
+ """
+ 根据id获取图片
+ :param image_id:
+ :param session:
+ :return:
+ """
+ result = session.query(piModel).filter_by(id=image_id).first()
+ sour_url = result.image_url
+ thumb_url = result.thumb_image_url
+ return sour_url, thumb_url
+
+
def get_image_list(project_id: int, session: Session):
query = session.query(piModel).filter(piModel.project_id == project_id).order_by(asc(piModel.id))
image_list = [ProjectImage.from_orm(image).dict() for image in query.all()]
diff --git a/test.py b/test.py
deleted file mode 100644
index 35abe97..0000000
--- a/test.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import subprocess
-
-
-def main():
- data = 'D:\\syg\\yolov5\\datasets\\hqjvaj\\v1\\hqjvaj.yaml'
- project = 'D:\\syg\\yolov5\\runs\\hqjvaj\\train'
- name = 'v1'
- epochs = 10
- yolo_path = 'yolov5/train.py'
-
- subprocess.run(["python", yolo_path, "--data=" + data, "--project="
- + project, "--name=" + name, "--epochs=" + str(epochs)], check=True)
-
-
-if __name__ == "__main__":
- """
- 如果此脚本被直接运行,则调用main()函数。
- 如果此脚本被其他脚本导入,则不会自动调用main()。
- """
- main()
\ No newline at end of file
diff --git a/yolov5/.dockerignore b/yolov5/.dockerignore
deleted file mode 100644
index 3b66925..0000000
--- a/yolov5/.dockerignore
+++ /dev/null
@@ -1,222 +0,0 @@
-# Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
-.git
-.cache
-.idea
-runs
-output
-coco
-storage.googleapis.com
-
-data/samples/*
-**/results*.csv
-*.jpg
-
-# Neural Network weights -----------------------------------------------------------------------------------------------
-**/*.pt
-**/*.pth
-**/*.onnx
-**/*.engine
-**/*.mlmodel
-**/*.torchscript
-**/*.torchscript.pt
-**/*.tflite
-**/*.h5
-**/*.pb
-*_saved_model/
-*_web_model/
-*_openvino_model/
-
-# Below Copied From .gitignore -----------------------------------------------------------------------------------------
-# Below Copied From .gitignore -----------------------------------------------------------------------------------------
-
-
-# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-env/
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-*.egg-info/
-wandb/
-.installed.cfg
-*.egg
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-.hypothesis/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# pyenv
-.python-version
-
-# celery beat schedule file
-celerybeat-schedule
-
-# SageMath parsed files
-*.sage.py
-
-# dotenv
-.env
-
-# virtualenv
-.venv*
-venv*/
-ENV*/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-
-
-# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
-
-# General
-.DS_Store
-.AppleDouble
-.LSOverride
-
-# Icon must end with two \r
-Icon
-Icon?
-
-# Thumbnails
-._*
-
-# Files that might appear in the root of a volume
-.DocumentRevisions-V100
-.fseventsd
-.Spotlight-V100
-.TemporaryItems
-.Trashes
-.VolumeIcon.icns
-.com.apple.timemachine.donotpresent
-
-# Directories potentially created on remote AFP share
-.AppleDB
-.AppleDesktop
-Network Trash Folder
-Temporary Items
-.apdisk
-
-
-# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff:
-.idea/*
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/dictionaries
-.html # Bokeh Plots
-.pg # TensorFlow Frozen Graphs
-.avi # videos
-
-# Sensitive or high-churn files:
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-
-# Gradle:
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# CMake
-cmake-build-debug/
-cmake-build-release/
-
-# Mongo Explorer plugin:
-.idea/**/mongoSettings.xml
-
-## File-based project format:
-*.iws
-
-## Plugin-specific files:
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
diff --git a/yolov5/.gitattributes b/yolov5/.gitattributes
deleted file mode 100644
index dad4239..0000000
--- a/yolov5/.gitattributes
+++ /dev/null
@@ -1,2 +0,0 @@
-# this drop notebooks from GitHub language stats
-*.ipynb linguist-vendored
diff --git a/yolov5/.gitignore b/yolov5/.gitignore
deleted file mode 100644
index 7f683c9..0000000
--- a/yolov5/.gitignore
+++ /dev/null
@@ -1,258 +0,0 @@
-# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
-*.jpg
-*.jpeg
-*.png
-*.bmp
-*.tif
-*.tiff
-*.heic
-*.JPG
-*.JPEG
-*.PNG
-*.BMP
-*.TIF
-*.TIFF
-*.HEIC
-*.mp4
-*.mov
-*.MOV
-*.avi
-*.data
-*.json
-*.cfg
-!setup.cfg
-!cfg/yolov3*.cfg
-
-storage.googleapis.com
-../runs/*
-data/*
-data/images/*
-!data/*.yaml
-!data/hyps
-!data/scripts
-!data/images
-!data/images/zidane.jpg
-!data/images/bus.jpg
-!data/*.sh
-
-results*.csv
-
-# Datasets -------------------------------------------------------------------------------------------------------------
-coco/
-coco128/
-VOC/
-
-# MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
-*.m~
-*.mat
-!targets*.mat
-
-# Neural Network weights -----------------------------------------------------------------------------------------------
-*.weights
-*.pt
-*.pb
-*.onnx
-*.engine
-*.mlmodel
-*.mlpackage
-*.torchscript
-*.tflite
-*.h5
-*_saved_model/
-*_web_model/
-*_openvino_model/
-*_paddle_model/
-darknet53.conv.74
-yolov3-tiny.conv.15
-
-# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-env/
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-*.egg-info/
-/wandb/
-.installed.cfg
-*.egg
-
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-.hypothesis/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# pyenv
-.python-version
-
-# celery beat schedule file
-celerybeat-schedule
-
-# SageMath parsed files
-*.sage.py
-
-# dotenv
-.env
-
-# virtualenv
-.venv*
-venv*/
-ENV*/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-
-
-# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
-
-# General
-.DS_Store
-.AppleDouble
-.LSOverride
-
-# Icon must end with two \r
-Icon
-Icon?
-
-# Thumbnails
-._*
-
-# Files that might appear in the root of a volume
-.DocumentRevisions-V100
-.fseventsd
-.Spotlight-V100
-.TemporaryItems
-.Trashes
-.VolumeIcon.icns
-.com.apple.timemachine.donotpresent
-
-# Directories potentially created on remote AFP share
-.AppleDB
-.AppleDesktop
-Network Trash Folder
-Temporary Items
-.apdisk
-
-
-# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff:
-.idea/*
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/dictionaries
-.html # Bokeh Plots
-.pg # TensorFlow Frozen Graphs
-.avi # videos
-
-# Sensitive or high-churn files:
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-
-# Gradle:
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# CMake
-cmake-build-debug/
-cmake-build-release/
-
-# Mongo Explorer plugin:
-.idea/**/mongoSettings.xml
-
-## File-based project format:
-*.iws
-
-## Plugin-specific files:
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
diff --git a/yolov5/classify/predict.py b/yolov5/classify/predict.py
deleted file mode 100644
index 59db133..0000000
--- a/yolov5/classify/predict.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-"""
-Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
-
-Usage - sources:
- $ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam
- img.jpg # image
- vid.mp4 # video
- screen # screenshot
- path/ # directory
- list.txt # list of images
- list.streams # list of streams
- 'path/*.jpg' # glob
- 'https://youtu.be/LNwODJXcvt4' # YouTube
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
-
-Usage - formats:
- $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch
- yolov5s-cls.torchscript # TorchScript
- yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s-cls_openvino_model # OpenVINO
- yolov5s-cls.engine # TensorRT
- yolov5s-cls.mlmodel # CoreML (macOS-only)
- yolov5s-cls_saved_model # TensorFlow SavedModel
- yolov5s-cls.pb # TensorFlow GraphDef
- yolov5s-cls.tflite # TensorFlow Lite
- yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
- yolov5s-cls_paddle_model # PaddlePaddle
-"""
-
-import argparse
-import os
-import platform
-import sys
-from pathlib import Path
-
-import torch
-import torch.nn.functional as F
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-from ultralytics.utils.plotting import Annotator
-
-from models.common import DetectMultiBackend
-from utils.augmentations import classify_transforms
-from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
-from utils.general import (
- LOGGER,
- Profile,
- check_file,
- check_img_size,
- check_imshow,
- check_requirements,
- colorstr,
- cv2,
- increment_path,
- print_args,
- strip_optimizer,
-)
-from utils.torch_utils import select_device, smart_inference_mode
-
-
-@smart_inference_mode()
-def run(
- weights=ROOT / "yolov5s-cls.pt", # model.pt path(s)
- source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam)
- data=ROOT / "data/coco128.yaml", # dataset.yaml path
- imgsz=(224, 224), # inference size (height, width)
- device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
- view_img=False, # show results
- save_txt=False, # save results to *.txt
- nosave=False, # do not save images/videos
- augment=False, # augmented inference
- visualize=False, # visualize features
- update=False, # update all models
- project=ROOT / "runs/predict-cls", # save results to project/name
- name="exp", # save results to project/name
- exist_ok=False, # existing project/name ok, do not increment
- half=False, # use FP16 half-precision inference
- dnn=False, # use OpenCV DNN for ONNX inference
- vid_stride=1, # video frame-rate stride
-):
- """Conducts YOLOv5 classification inference on diverse input sources and saves results."""
- source = str(source)
- save_img = not nosave and not source.endswith(".txt") # save inference images
- is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
- is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
- webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
- screenshot = source.lower().startswith("screen")
- if is_url and is_file:
- source = check_file(source) # download
-
- # Directories
- save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
- (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
-
- # Load model
- device = select_device(device)
- model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
- stride, names, pt = model.stride, model.names, model.pt
- imgsz = check_img_size(imgsz, s=stride) # check image size
-
- # Dataloader
- bs = 1 # batch_size
- if webcam:
- view_img = check_imshow(warn=True)
- dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
- bs = len(dataset)
- elif screenshot:
- dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
- else:
- dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
- vid_path, vid_writer = [None] * bs, [None] * bs
-
- # Run inference
- model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
- seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))
- for path, im, im0s, vid_cap, s in dataset:
- with dt[0]:
- im = torch.Tensor(im).to(model.device)
- im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
- if len(im.shape) == 3:
- im = im[None] # expand for batch dim
-
- # Inference
- with dt[1]:
- results = model(im)
-
- # Post-process
- with dt[2]:
- pred = F.softmax(results, dim=1) # probabilities
-
- # Process predictions
- for i, prob in enumerate(pred): # per image
- seen += 1
- if webcam: # batch_size >= 1
- p, im0, frame = path[i], im0s[i].copy(), dataset.count
- s += f"{i}: "
- else:
- p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
-
- p = Path(p) # to Path
- save_path = str(save_dir / p.name) # im.jpg
- txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt
-
- s += "{:g}x{:g} ".format(*im.shape[2:]) # print string
- annotator = Annotator(im0, example=str(names), pil=True)
-
- # Print results
- top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices
- s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, "
-
- # Write results
- text = "\n".join(f"{prob[j]:.2f} {names[j]}" for j in top5i)
- if save_img or view_img: # Add bbox to image
- annotator.text([32, 32], text, txt_color=(255, 255, 255))
- if save_txt: # Write to file
- with open(f"{txt_path}.txt", "a") as f:
- f.write(text + "\n")
-
- # Stream results
- im0 = annotator.result()
- if view_img:
- if platform.system() == "Linux" and p not in windows:
- windows.append(p)
- cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
- cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
- cv2.imshow(str(p), im0)
- cv2.waitKey(1) # 1 millisecond
-
- # Save results (image with detections)
- if save_img:
- if dataset.mode == "image":
- cv2.imwrite(save_path, im0)
- else: # 'video' or 'stream'
- if vid_path[i] != save_path: # new video
- vid_path[i] = save_path
- if isinstance(vid_writer[i], cv2.VideoWriter):
- vid_writer[i].release() # release previous video writer
- if vid_cap: # video
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
- w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
- h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- else: # stream
- fps, w, h = 30, im0.shape[1], im0.shape[0]
- save_path = str(Path(save_path).with_suffix(".mp4")) # force *.mp4 suffix on results videos
- vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
- vid_writer[i].write(im0)
-
- # Print time (inference-only)
- LOGGER.info(f"{s}{dt[1].dt * 1e3:.1f}ms")
-
- # Print results
- t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image
- LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t)
- if save_txt or save_img:
- s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
- if update:
- strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
-
-
-def parse_opt():
- """Parses command line arguments for YOLOv5 inference settings including model, source, device, and image size."""
- parser = argparse.ArgumentParser()
- parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-cls.pt", help="model path(s)")
- parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)")
- parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path")
- parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[224], help="inference size h,w")
- parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
- parser.add_argument("--view-img", action="store_true", help="show results")
- parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
- parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
- parser.add_argument("--augment", action="store_true", help="augmented inference")
- parser.add_argument("--visualize", action="store_true", help="visualize features")
- parser.add_argument("--update", action="store_true", help="update all models")
- parser.add_argument("--project", default=ROOT / "runs/predict-cls", help="save results to project/name")
- parser.add_argument("--name", default="exp", help="save results to project/name")
- parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
- parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
- parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
- parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride")
- opt = parser.parse_args()
- opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
- print_args(vars(opt))
- return opt
-
-
-def main(opt):
- """Executes YOLOv5 model inference with options for ONNX DNN and video frame-rate stride adjustments."""
- check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
- run(**vars(opt))
-
-
-if __name__ == "__main__":
- opt = parse_opt()
- main(opt)
diff --git a/yolov5/classify/train.py b/yolov5/classify/train.py
deleted file mode 100644
index d454c71..0000000
--- a/yolov5/classify/train.py
+++ /dev/null
@@ -1,382 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-"""
-Train a YOLOv5 classifier model on a classification dataset.
-
-Usage - Single-GPU training:
- $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224
-
-Usage - Multi-GPU DDP training:
- $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
-
-Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
-YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
-Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
-"""
-
-import argparse
-import os
-import subprocess
-import sys
-import time
-from copy import deepcopy
-from datetime import datetime
-from pathlib import Path
-
-import torch
-import torch.distributed as dist
-import torch.hub as hub
-import torch.optim.lr_scheduler as lr_scheduler
-import torchvision
-from torch.cuda import amp
-from tqdm import tqdm
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-from classify import val as validate
-from models.experimental import attempt_load
-from models.yolo import ClassificationModel, DetectionModel
-from utils.dataloaders import create_classification_dataloader
-from utils.general import (
- DATASETS_DIR,
- LOGGER,
- TQDM_BAR_FORMAT,
- WorkingDirectory,
- check_git_info,
- check_git_status,
- check_requirements,
- colorstr,
- download,
- increment_path,
- init_seeds,
- print_args,
- yaml_save,
-)
-from utils.loggers import GenericLogger
-from utils.plots import imshow_cls
-from utils.torch_utils import (
- ModelEMA,
- de_parallel,
- model_info,
- reshape_classifier_output,
- select_device,
- smart_DDP,
- smart_optimizer,
- smartCrossEntropyLoss,
- torch_distributed_zero_first,
-)
-
-LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html
-RANK = int(os.getenv("RANK", -1))
-WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1))
-GIT_INFO = check_git_info()
-
-
-def train(opt, device):
- """Trains a YOLOv5 model, managing datasets, model optimization, logging, and saving checkpoints."""
- init_seeds(opt.seed + 1 + RANK, deterministic=True)
- save_dir, data, bs, epochs, nw, imgsz, pretrained = (
- opt.save_dir,
- Path(opt.data),
- opt.batch_size,
- opt.epochs,
- min(os.cpu_count() - 1, opt.workers),
- opt.imgsz,
- str(opt.pretrained).lower() == "true",
- )
- cuda = device.type != "cpu"
-
- # Directories
- wdir = save_dir / "weights"
- wdir.mkdir(parents=True, exist_ok=True) # make dir
- last, best = wdir / "last.pt", wdir / "best.pt"
-
- # Save run settings
- yaml_save(save_dir / "opt.yaml", vars(opt))
-
- # Logger
- logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None
-
- # Download Dataset
- with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
- data_dir = data if data.is_dir() else (DATASETS_DIR / data)
- if not data_dir.is_dir():
- LOGGER.info(f"\nDataset not found ⚠️, missing path {data_dir}, attempting download...")
- t = time.time()
- if str(data) == "imagenet":
- subprocess.run(["bash", str(ROOT / "data/scripts/get_imagenet.sh")], shell=True, check=True)
- else:
- url = f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{data}.zip"
- download(url, dir=data_dir.parent)
- s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
- LOGGER.info(s)
-
- # Dataloaders
- nc = len([x for x in (data_dir / "train").glob("*") if x.is_dir()]) # number of classes
- trainloader = create_classification_dataloader(
- path=data_dir / "train",
- imgsz=imgsz,
- batch_size=bs // WORLD_SIZE,
- augment=True,
- cache=opt.cache,
- rank=LOCAL_RANK,
- workers=nw,
- )
-
- test_dir = data_dir / "test" if (data_dir / "test").exists() else data_dir / "val" # data/test or data/val
- if RANK in {-1, 0}:
- testloader = create_classification_dataloader(
- path=test_dir,
- imgsz=imgsz,
- batch_size=bs // WORLD_SIZE * 2,
- augment=False,
- cache=opt.cache,
- rank=-1,
- workers=nw,
- )
-
- # Model
- with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
- if Path(opt.model).is_file() or opt.model.endswith(".pt"):
- model = attempt_load(opt.model, device="cpu", fuse=False)
- elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
- model = torchvision.models.__dict__[opt.model](weights="IMAGENET1K_V1" if pretrained else None)
- else:
- m = hub.list("ultralytics/yolov5") # + hub.list('pytorch/vision') # models
- raise ModuleNotFoundError(f"--model {opt.model} not found. Available models are: \n" + "\n".join(m))
- if isinstance(model, DetectionModel):
- LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
- model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model
- reshape_classifier_output(model, nc) # update class count
- for m in model.modules():
- if not pretrained and hasattr(m, "reset_parameters"):
- m.reset_parameters()
- if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
- m.p = opt.dropout # set dropout
- for p in model.parameters():
- p.requires_grad = True # for training
- model = model.to(device)
-
- # Info
- if RANK in {-1, 0}:
- model.names = trainloader.dataset.classes # attach class names
- model.transforms = testloader.dataset.torch_transforms # attach inference transforms
- model_info(model)
- if opt.verbose:
- LOGGER.info(model)
- images, labels = next(iter(trainloader))
- file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / "train_images.jpg")
- logger.log_images(file, name="Train Examples")
- logger.log_graph(model, imgsz) # log model
-
- # Optimizer
- optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay)
-
- # Scheduler
- lrf = 0.01 # final lr (fraction of lr0)
-
- # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
- def lf(x):
- """Linear learning rate scheduler function, scaling learning rate from initial value to `lrf` over `epochs`."""
- return (1 - x / epochs) * (1 - lrf) + lrf # linear
-
- scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
- # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
- # final_div_factor=1 / 25 / lrf)
-
- # EMA
- ema = ModelEMA(model) if RANK in {-1, 0} else None
-
- # DDP mode
- if cuda and RANK != -1:
- model = smart_DDP(model)
-
- # Train
- t0 = time.time()
- criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function
- best_fitness = 0.0
- scaler = amp.GradScaler(enabled=cuda)
- val = test_dir.stem # 'val' or 'test'
- LOGGER.info(
- f"Image sizes {imgsz} train, {imgsz} test\n"
- f"Using {nw * WORLD_SIZE} dataloader workers\n"
- f"Logging results to {colorstr('bold', save_dir)}\n"
- f"Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n"
- f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}"
- )
- for epoch in range(epochs): # loop over the dataset multiple times
- tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness
- model.train()
- if RANK != -1:
- trainloader.sampler.set_epoch(epoch)
- pbar = enumerate(trainloader)
- if RANK in {-1, 0}:
- pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT)
- for i, (images, labels) in pbar: # progress bar
- images, labels = images.to(device, non_blocking=True), labels.to(device)
-
- # Forward
- with amp.autocast(enabled=cuda): # stability issues when enabled
- loss = criterion(model(images), labels)
-
- # Backward
- scaler.scale(loss).backward()
-
- # Optimize
- scaler.unscale_(optimizer) # unscale gradients
- torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
- scaler.step(optimizer)
- scaler.update()
- optimizer.zero_grad()
- if ema:
- ema.update(model)
-
- if RANK in {-1, 0}:
- # Print
- tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses
- mem = "%.3gG" % (torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0) # (GB)
- pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + " " * 36
-
- # Test
- if i == len(pbar) - 1: # last batch
- top1, top5, vloss = validate.run(
- model=ema.ema, dataloader=testloader, criterion=criterion, pbar=pbar
- ) # test accuracy, loss
- fitness = top1 # define fitness as top1 accuracy
-
- # Scheduler
- scheduler.step()
-
- # Log metrics
- if RANK in {-1, 0}:
- # Best fitness
- if fitness > best_fitness:
- best_fitness = fitness
-
- # Log
- metrics = {
- "train/loss": tloss,
- f"{val}/loss": vloss,
- "metrics/accuracy_top1": top1,
- "metrics/accuracy_top5": top5,
- "lr/0": optimizer.param_groups[0]["lr"],
- } # learning rate
- logger.log_metrics(metrics, epoch)
-
- # Save model
- final_epoch = epoch + 1 == epochs
- if (not opt.nosave) or final_epoch:
- ckpt = {
- "epoch": epoch,
- "best_fitness": best_fitness,
- "model": deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(),
- "ema": None, # deepcopy(ema.ema).half(),
- "updates": ema.updates,
- "optimizer": None, # optimizer.state_dict(),
- "opt": vars(opt),
- "git": GIT_INFO, # {remote, branch, commit} if a git repo
- "date": datetime.now().isoformat(),
- }
-
- # Save last, best and delete
- torch.save(ckpt, last)
- if best_fitness == fitness:
- torch.save(ckpt, best)
- del ckpt
-
- # Train complete
- if RANK in {-1, 0} and final_epoch:
- LOGGER.info(
- f"\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)"
- f"\nResults saved to {colorstr('bold', save_dir)}"
- f"\nPredict: python classify/predict.py --weights {best} --source im.jpg"
- f"\nValidate: python classify/val.py --weights {best} --data {data_dir}"
- f"\nExport: python export.py --weights {best} --include onnx"
- f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')"
- f"\nVisualize: https://netron.app\n"
- )
-
- # Plot examples
- images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels
- pred = torch.max(ema.ema(images.to(device)), 1)[1]
- file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / "test_images.jpg")
-
- # Log results
- meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()}
- logger.log_images(file, name="Test Examples (true-predicted)", epoch=epoch)
- logger.log_model(best, epochs, metadata=meta)
-
-
-def parse_opt(known=False):
- """Parses command line arguments for YOLOv5 training including model path, dataset, epochs, and more, returning
- parsed arguments.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument("--model", type=str, default="yolov5s-cls.pt", help="initial weights path")
- parser.add_argument("--data", type=str, default="imagenette160", help="cifar10, cifar100, mnist, imagenet, ...")
- parser.add_argument("--epochs", type=int, default=10, help="total training epochs")
- parser.add_argument("--batch-size", type=int, default=64, help="total batch size for all GPUs")
- parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=224, help="train, val image size (pixels)")
- parser.add_argument("--nosave", action="store_true", help="only save final checkpoint")
- parser.add_argument("--cache", type=str, nargs="?", const="ram", help='--cache images in "ram" (default) or "disk"')
- parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
- parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
- parser.add_argument("--project", default=ROOT / "runs/train-cls", help="save to project/name")
- parser.add_argument("--name", default="exp", help="save to project/name")
- parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
- parser.add_argument("--pretrained", nargs="?", const=True, default=True, help="start from i.e. --pretrained False")
- parser.add_argument("--optimizer", choices=["SGD", "Adam", "AdamW", "RMSProp"], default="Adam", help="optimizer")
- parser.add_argument("--lr0", type=float, default=0.001, help="initial learning rate")
- parser.add_argument("--decay", type=float, default=5e-5, help="weight decay")
- parser.add_argument("--label-smoothing", type=float, default=0.1, help="Label smoothing epsilon")
- parser.add_argument("--cutoff", type=int, default=None, help="Model layer cutoff index for Classify() head")
- parser.add_argument("--dropout", type=float, default=None, help="Dropout (fraction)")
- parser.add_argument("--verbose", action="store_true", help="Verbose mode")
- parser.add_argument("--seed", type=int, default=0, help="Global training seed")
- parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify")
- return parser.parse_known_args()[0] if known else parser.parse_args()
-
-
-def main(opt):
- """Executes YOLOv5 training with given options, handling device setup and DDP mode; includes pre-training checks."""
- if RANK in {-1, 0}:
- print_args(vars(opt))
- check_git_status()
- check_requirements(ROOT / "requirements.txt")
-
- # DDP mode
- device = select_device(opt.device, batch_size=opt.batch_size)
- if LOCAL_RANK != -1:
- assert opt.batch_size != -1, "AutoBatch is coming soon for classification, please pass a valid --batch-size"
- assert opt.batch_size % WORLD_SIZE == 0, f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE"
- assert torch.cuda.device_count() > LOCAL_RANK, "insufficient CUDA devices for DDP command"
- torch.cuda.set_device(LOCAL_RANK)
- device = torch.device("cuda", LOCAL_RANK)
- dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
-
- # Parameters
- opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
-
- # Train
- train(opt, device)
-
-
-def run(**kwargs):
- """
- Executes YOLOv5 model training or inference with specified parameters, returning updated options.
-
- Example: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
- """
- opt = parse_opt(True)
- for k, v in kwargs.items():
- setattr(opt, k, v)
- main(opt)
- return opt
-
-
-if __name__ == "__main__":
- opt = parse_opt()
- main(opt)
diff --git a/yolov5/classify/tutorial.ipynb b/yolov5/classify/tutorial.ipynb
deleted file mode 100644
index c547a29..0000000
--- a/yolov5/classify/tutorial.ipynb
+++ /dev/null
@@ -1,1488 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "t6MPjfT5NrKQ"
- },
- "source": [
- "
\n",
- "\n",
- "
\n",
- " 
\n",
- "\n",
- "\n",
- "
\n",
- "

\n",
- "

\n",
- "

\n",
- "
\n",
- "\n",
- "This
YOLOv5 🚀 notebook by
Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See
GitHub for community support or
contact us for professional support.\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "7mGmQbAO5pQb"
- },
- "source": [
- "# Setup\n",
- "\n",
- "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "wbvMlHd_QwMG",
- "outputId": "0806e375-610d-4ec0-c867-763dbb518279"
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n"
- ]
- }
- ],
- "source": [
- "!git clone https://github.com/ultralytics/yolov5 # clone\n",
- "%cd yolov5\n",
- "%pip install -qr requirements.txt # install\n",
- "\n",
- "import torch\n",
- "\n",
- "import utils\n",
- "\n",
- "display = utils.notebook_init() # checks"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "4JnkELT0cIJg"
- },
- "source": [
- "# 1. Predict\n",
- "\n",
- "`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n",
- "\n",
- "```shell\n",
- "python classify/predict.py --source 0 # webcam\n",
- " img.jpg # image \n",
- " vid.mp4 # video\n",
- " screen # screenshot\n",
- " path/ # directory\n",
- " 'path/*.jpg' # glob\n",
- " 'https://youtu.be/LNwODJXcvt4' # YouTube\n",
- " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
- "```"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "zR9ZbuQCH7FX",
- "outputId": "50504ef7-aa3e-4281-a4e3-d0c7df3c0ffe"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n",
- "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
- "\n",
- "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt to yolov5s-cls.pt...\n",
- "100% 10.5M/10.5M [00:00<00:00, 12.3MB/s]\n",
- "\n",
- "Fusing layers... \n",
- "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n",
- "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n",
- "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.6ms\n",
- "Speed: 0.3ms pre-process, 4.3ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n",
- "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n"
- ]
- }
- ],
- "source": [
- "!python classify/predict.py --weights yolov5s-cls.pt --img 224 --source data/images\n",
- "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "hkAzDWJ7cWTr"
- },
- "source": [
- " \n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "0eq1SMWl6Sfn"
- },
- "source": [
- "# 2. Validate\n",
- "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "WQPtK1QYVaD_",
- "outputId": "20fc0630-141e-4a90-ea06-342cbd7ce496"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "--2022-11-22 19:53:40-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n",
- "Resolving image-net.org (image-net.org)... 171.64.68.16\n",
- "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n",
- "HTTP request sent, awaiting response... 200 OK\n",
- "Length: 6744924160 (6.3G) [application/x-tar]\n",
- "Saving to: ‘ILSVRC2012_img_val.tar’\n",
- "\n",
- "ILSVRC2012_img_val. 100%[===================>] 6.28G 16.1MB/s in 10m 52s \n",
- "\n",
- "2022-11-22 20:04:32 (9.87 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n",
- "\n"
- ]
- }
- ],
- "source": [
- "# Download Imagenet val (6.3G, 50000 images)\n",
- "!bash data/scripts/get_imagenet.sh --val"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "X58w8JLpMnjH",
- "outputId": "41843132-98e2-4c25-d474-4cd7b246fb8e"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n",
- "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
- "\n",
- "Fusing layers... \n",
- "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n",
- "validating: 100% 391/391 [04:57<00:00, 1.31it/s]\n",
- " Class Images top1_acc top5_acc\n",
- " all 50000 0.715 0.902\n",
- " tench 50 0.94 0.98\n",
- " goldfish 50 0.88 0.92\n",
- " great white shark 50 0.78 0.96\n",
- " tiger shark 50 0.68 0.96\n",
- " hammerhead shark 50 0.82 0.92\n",
- " electric ray 50 0.76 0.9\n",
- " stingray 50 0.7 0.9\n",
- " cock 50 0.78 0.92\n",
- " hen 50 0.84 0.96\n",
- " ostrich 50 0.98 1\n",
- " brambling 50 0.9 0.96\n",
- " goldfinch 50 0.92 0.98\n",
- " house finch 50 0.88 0.96\n",
- " junco 50 0.94 0.98\n",
- " indigo bunting 50 0.86 0.88\n",
- " American robin 50 0.9 0.96\n",
- " bulbul 50 0.84 0.96\n",
- " jay 50 0.9 0.96\n",
- " magpie 50 0.84 0.96\n",
- " chickadee 50 0.9 1\n",
- " American dipper 50 0.82 0.92\n",
- " kite 50 0.76 0.94\n",
- " bald eagle 50 0.92 1\n",
- " vulture 50 0.96 1\n",
- " great grey owl 50 0.94 0.98\n",
- " fire salamander 50 0.96 0.98\n",
- " smooth newt 50 0.58 0.94\n",
- " newt 50 0.74 0.9\n",
- " spotted salamander 50 0.86 0.94\n",
- " axolotl 50 0.86 0.96\n",
- " American bullfrog 50 0.78 0.92\n",
- " tree frog 50 0.84 0.96\n",
- " tailed frog 50 0.48 0.8\n",
- " loggerhead sea turtle 50 0.68 0.94\n",
- " leatherback sea turtle 50 0.5 0.8\n",
- " mud turtle 50 0.64 0.84\n",
- " terrapin 50 0.52 0.98\n",
- " box turtle 50 0.84 0.98\n",
- " banded gecko 50 0.7 0.88\n",
- " green iguana 50 0.76 0.94\n",
- " Carolina anole 50 0.58 0.96\n",
- "desert grassland whiptail lizard 50 0.82 0.94\n",
- " agama 50 0.74 0.92\n",
- " frilled-necked lizard 50 0.84 0.86\n",
- " alligator lizard 50 0.58 0.78\n",
- " Gila monster 50 0.72 0.8\n",
- " European green lizard 50 0.42 0.9\n",
- " chameleon 50 0.76 0.84\n",
- " Komodo dragon 50 0.86 0.96\n",
- " Nile crocodile 50 0.7 0.84\n",
- " American alligator 50 0.76 0.96\n",
- " triceratops 50 0.9 0.94\n",
- " worm snake 50 0.76 0.88\n",
- " ring-necked snake 50 0.8 0.92\n",
- " eastern hog-nosed snake 50 0.58 0.88\n",
- " smooth green snake 50 0.6 0.94\n",
- " kingsnake 50 0.82 0.9\n",
- " garter snake 50 0.88 0.94\n",
- " water snake 50 0.7 0.94\n",
- " vine snake 50 0.66 0.76\n",
- " night snake 50 0.34 0.82\n",
- " boa constrictor 50 0.8 0.96\n",
- " African rock python 50 0.48 0.76\n",
- " Indian cobra 50 0.82 0.94\n",
- " green mamba 50 0.54 0.86\n",
- " sea snake 50 0.62 0.9\n",
- " Saharan horned viper 50 0.56 0.86\n",
- "eastern diamondback rattlesnake 50 0.6 0.86\n",
- " sidewinder 50 0.28 0.86\n",
- " trilobite 50 0.98 0.98\n",
- " harvestman 50 0.86 0.94\n",
- " scorpion 50 0.86 0.94\n",
- " yellow garden spider 50 0.92 0.96\n",
- " barn spider 50 0.38 0.98\n",
- " European garden spider 50 0.62 0.98\n",
- " southern black widow 50 0.88 0.94\n",
- " tarantula 50 0.94 1\n",
- " wolf spider 50 0.82 0.92\n",
- " tick 50 0.74 0.84\n",
- " centipede 50 0.68 0.82\n",
- " black grouse 50 0.88 0.98\n",
- " ptarmigan 50 0.78 0.94\n",
- " ruffed grouse 50 0.88 1\n",
- " prairie grouse 50 0.92 1\n",
- " peacock 50 0.88 0.9\n",
- " quail 50 0.9 0.94\n",
- " partridge 50 0.74 0.96\n",
- " grey parrot 50 0.9 0.96\n",
- " macaw 50 0.88 0.98\n",
- "sulphur-crested cockatoo 50 0.86 0.92\n",
- " lorikeet 50 0.96 1\n",
- " coucal 50 0.82 0.88\n",
- " bee eater 50 0.96 0.98\n",
- " hornbill 50 0.9 0.96\n",
- " hummingbird 50 0.88 0.96\n",
- " jacamar 50 0.92 0.94\n",
- " toucan 50 0.84 0.94\n",
- " duck 50 0.76 0.94\n",
- " red-breasted merganser 50 0.86 0.96\n",
- " goose 50 0.74 0.96\n",
- " black swan 50 0.94 0.98\n",
- " tusker 50 0.54 0.92\n",
- " echidna 50 0.98 1\n",
- " platypus 50 0.72 0.84\n",
- " wallaby 50 0.78 0.88\n",
- " koala 50 0.84 0.92\n",
- " wombat 50 0.78 0.84\n",
- " jellyfish 50 0.88 0.96\n",
- " sea anemone 50 0.72 0.9\n",
- " brain coral 50 0.88 0.96\n",
- " flatworm 50 0.8 0.98\n",
- " nematode 50 0.86 0.9\n",
- " conch 50 0.74 0.88\n",
- " snail 50 0.78 0.88\n",
- " slug 50 0.74 0.82\n",
- " sea slug 50 0.88 0.98\n",
- " chiton 50 0.88 0.98\n",
- " chambered nautilus 50 0.88 0.92\n",
- " Dungeness crab 50 0.78 0.94\n",
- " rock crab 50 0.68 0.86\n",
- " fiddler crab 50 0.64 0.86\n",
- " red king crab 50 0.76 0.96\n",
- " American lobster 50 0.78 0.96\n",
- " spiny lobster 50 0.74 0.88\n",
- " crayfish 50 0.56 0.86\n",
- " hermit crab 50 0.78 0.96\n",
- " isopod 50 0.66 0.78\n",
- " white stork 50 0.88 0.96\n",
- " black stork 50 0.84 0.98\n",
- " spoonbill 50 0.96 1\n",
- " flamingo 50 0.94 1\n",
- " little blue heron 50 0.92 0.98\n",
- " great egret 50 0.9 0.96\n",
- " bittern 50 0.86 0.94\n",
- " crane (bird) 50 0.62 0.9\n",
- " limpkin 50 0.98 1\n",
- " common gallinule 50 0.92 0.96\n",
- " American coot 50 0.9 0.98\n",
- " bustard 50 0.92 0.96\n",
- " ruddy turnstone 50 0.94 1\n",
- " dunlin 50 0.86 0.94\n",
- " common redshank 50 0.9 0.96\n",
- " dowitcher 50 0.84 0.96\n",
- " oystercatcher 50 0.86 0.94\n",
- " pelican 50 0.92 0.96\n",
- " king penguin 50 0.88 0.96\n",
- " albatross 50 0.9 1\n",
- " grey whale 50 0.84 0.92\n",
- " killer whale 50 0.92 1\n",
- " dugong 50 0.84 0.96\n",
- " sea lion 50 0.82 0.92\n",
- " Chihuahua 50 0.66 0.84\n",
- " Japanese Chin 50 0.72 0.98\n",
- " Maltese 50 0.76 0.94\n",
- " Pekingese 50 0.84 0.94\n",
- " Shih Tzu 50 0.74 0.96\n",
- " King Charles Spaniel 50 0.88 0.98\n",
- " Papillon 50 0.86 0.94\n",
- " toy terrier 50 0.48 0.94\n",
- " Rhodesian Ridgeback 50 0.76 0.98\n",
- " Afghan Hound 50 0.84 1\n",
- " Basset Hound 50 0.8 0.92\n",
- " Beagle 50 0.82 0.96\n",
- " Bloodhound 50 0.48 0.72\n",
- " Bluetick Coonhound 50 0.86 0.94\n",
- " Black and Tan Coonhound 50 0.54 0.8\n",
- "Treeing Walker Coonhound 50 0.66 0.98\n",
- " English foxhound 50 0.32 0.84\n",
- " Redbone Coonhound 50 0.62 0.94\n",
- " borzoi 50 0.92 1\n",
- " Irish Wolfhound 50 0.48 0.88\n",
- " Italian Greyhound 50 0.76 0.98\n",
- " Whippet 50 0.74 0.92\n",
- " Ibizan Hound 50 0.6 0.86\n",
- " Norwegian Elkhound 50 0.88 0.98\n",
- " Otterhound 50 0.62 0.9\n",
- " Saluki 50 0.72 0.92\n",
- " Scottish Deerhound 50 0.86 0.98\n",
- " Weimaraner 50 0.88 0.94\n",
- "Staffordshire Bull Terrier 50 0.66 0.98\n",
- "American Staffordshire Terrier 50 0.64 0.92\n",
- " Bedlington Terrier 50 0.9 0.92\n",
- " Border Terrier 50 0.86 0.92\n",
- " Kerry Blue Terrier 50 0.78 0.98\n",
- " Irish Terrier 50 0.7 0.96\n",
- " Norfolk Terrier 50 0.68 0.9\n",
- " Norwich Terrier 50 0.72 1\n",
- " Yorkshire Terrier 50 0.66 0.9\n",
- " Wire Fox Terrier 50 0.64 0.98\n",
- " Lakeland Terrier 50 0.74 0.92\n",
- " Sealyham Terrier 50 0.76 0.9\n",
- " Airedale Terrier 50 0.82 0.92\n",
- " Cairn Terrier 50 0.76 0.9\n",
- " Australian Terrier 50 0.48 0.84\n",
- " Dandie Dinmont Terrier 50 0.82 0.92\n",
- " Boston Terrier 50 0.92 1\n",
- " Miniature Schnauzer 50 0.68 0.9\n",
- " Giant Schnauzer 50 0.72 0.98\n",
- " Standard Schnauzer 50 0.74 1\n",
- " Scottish Terrier 50 0.76 0.96\n",
- " Tibetan Terrier 50 0.48 1\n",
- "Australian Silky Terrier 50 0.66 0.96\n",
- "Soft-coated Wheaten Terrier 50 0.74 0.96\n",
- "West Highland White Terrier 50 0.88 0.96\n",
- " Lhasa Apso 50 0.68 0.96\n",
- " Flat-Coated Retriever 50 0.72 0.94\n",
- " Curly-coated Retriever 50 0.82 0.94\n",
- " Golden Retriever 50 0.86 0.94\n",
- " Labrador Retriever 50 0.82 0.94\n",
- "Chesapeake Bay Retriever 50 0.76 0.96\n",
- "German Shorthaired Pointer 50 0.8 0.96\n",
- " Vizsla 50 0.68 0.96\n",
- " English Setter 50 0.7 1\n",
- " Irish Setter 50 0.8 0.9\n",
- " Gordon Setter 50 0.84 0.92\n",
- " Brittany 50 0.84 0.96\n",
- " Clumber Spaniel 50 0.92 0.96\n",
- "English Springer Spaniel 50 0.88 1\n",
- " Welsh Springer Spaniel 50 0.92 1\n",
- " Cocker Spaniels 50 0.7 0.94\n",
- " Sussex Spaniel 50 0.72 0.92\n",
- " Irish Water Spaniel 50 0.88 0.98\n",
- " Kuvasz 50 0.66 0.9\n",
- " Schipperke 50 0.9 0.98\n",
- " Groenendael 50 0.8 0.94\n",
- " Malinois 50 0.86 0.98\n",
- " Briard 50 0.52 0.8\n",
- " Australian Kelpie 50 0.6 0.88\n",
- " Komondor 50 0.88 0.94\n",
- " Old English Sheepdog 50 0.94 0.98\n",
- " Shetland Sheepdog 50 0.74 0.9\n",
- " collie 50 0.6 0.96\n",
- " Border Collie 50 0.74 0.96\n",
- " Bouvier des Flandres 50 0.78 0.94\n",
- " Rottweiler 50 0.88 0.96\n",
- " German Shepherd Dog 50 0.8 0.98\n",
- " Dobermann 50 0.68 0.96\n",
- " Miniature Pinscher 50 0.76 0.88\n",
- "Greater Swiss Mountain Dog 50 0.68 0.94\n",
- " Bernese Mountain Dog 50 0.96 1\n",
- " Appenzeller Sennenhund 50 0.22 1\n",
- " Entlebucher Sennenhund 50 0.64 0.98\n",
- " Boxer 50 0.7 0.92\n",
- " Bullmastiff 50 0.78 0.98\n",
- " Tibetan Mastiff 50 0.88 0.96\n",
- " French Bulldog 50 0.84 0.94\n",
- " Great Dane 50 0.54 0.9\n",
- " St. Bernard 50 0.92 1\n",
- " husky 50 0.46 0.98\n",
- " Alaskan Malamute 50 0.76 0.96\n",
- " Siberian Husky 50 0.46 0.98\n",
- " Dalmatian 50 0.94 0.98\n",
- " Affenpinscher 50 0.78 0.9\n",
- " Basenji 50 0.92 0.94\n",
- " pug 50 0.94 0.98\n",
- " Leonberger 50 1 1\n",
- " Newfoundland 50 0.78 0.96\n",
- " Pyrenean Mountain Dog 50 0.78 0.96\n",
- " Samoyed 50 0.96 1\n",
- " Pomeranian 50 0.98 1\n",
- " Chow Chow 50 0.9 0.96\n",
- " Keeshond 50 0.88 0.94\n",
- " Griffon Bruxellois 50 0.84 0.98\n",
- " Pembroke Welsh Corgi 50 0.82 0.94\n",
- " Cardigan Welsh Corgi 50 0.66 0.98\n",
- " Toy Poodle 50 0.52 0.88\n",
- " Miniature Poodle 50 0.52 0.92\n",
- " Standard Poodle 50 0.8 1\n",
- " Mexican hairless dog 50 0.88 0.98\n",
- " grey wolf 50 0.82 0.92\n",
- " Alaskan tundra wolf 50 0.78 0.98\n",
- " red wolf 50 0.48 0.9\n",
- " coyote 50 0.64 0.86\n",
- " dingo 50 0.76 0.88\n",
- " dhole 50 0.9 0.98\n",
- " African wild dog 50 0.98 1\n",
- " hyena 50 0.88 0.96\n",
- " red fox 50 0.54 0.92\n",
- " kit fox 50 0.72 0.98\n",
- " Arctic fox 50 0.94 1\n",
- " grey fox 50 0.7 0.94\n",
- " tabby cat 50 0.54 0.92\n",
- " tiger cat 50 0.22 0.94\n",
- " Persian cat 50 0.9 0.98\n",
- " Siamese cat 50 0.96 1\n",
- " Egyptian Mau 50 0.54 0.8\n",
- " cougar 50 0.9 1\n",
- " lynx 50 0.72 0.88\n",
- " leopard 50 0.78 0.98\n",
- " snow leopard 50 0.9 0.98\n",
- " jaguar 50 0.7 0.94\n",
- " lion 50 0.9 0.98\n",
- " tiger 50 0.92 0.98\n",
- " cheetah 50 0.94 0.98\n",
- " brown bear 50 0.94 0.98\n",
- " American black bear 50 0.8 1\n",
- " polar bear 50 0.84 0.96\n",
- " sloth bear 50 0.72 0.92\n",
- " mongoose 50 0.7 0.92\n",
- " meerkat 50 0.82 0.92\n",
- " tiger beetle 50 0.92 0.94\n",
- " ladybug 50 0.86 0.94\n",
- " ground beetle 50 0.64 0.94\n",
- " longhorn beetle 50 0.62 0.88\n",
- " leaf beetle 50 0.64 0.98\n",
- " dung beetle 50 0.86 0.98\n",
- " rhinoceros beetle 50 0.86 0.94\n",
- " weevil 50 0.9 1\n",
- " fly 50 0.78 0.94\n",
- " bee 50 0.68 0.94\n",
- " ant 50 0.68 0.78\n",
- " grasshopper 50 0.5 0.92\n",
- " cricket 50 0.64 0.92\n",
- " stick insect 50 0.64 0.92\n",
- " cockroach 50 0.72 0.8\n",
- " mantis 50 0.64 0.86\n",
- " cicada 50 0.9 0.96\n",
- " leafhopper 50 0.88 0.94\n",
- " lacewing 50 0.78 0.92\n",
- " dragonfly 50 0.82 0.98\n",
- " damselfly 50 0.82 1\n",
- " red admiral 50 0.94 0.96\n",
- " ringlet 50 0.86 0.98\n",
- " monarch butterfly 50 0.9 0.92\n",
- " small white 50 0.9 1\n",
- " sulphur butterfly 50 0.92 1\n",
- "gossamer-winged butterfly 50 0.88 1\n",
- " starfish 50 0.88 0.92\n",
- " sea urchin 50 0.84 0.94\n",
- " sea cucumber 50 0.66 0.84\n",
- " cottontail rabbit 50 0.72 0.94\n",
- " hare 50 0.84 0.96\n",
- " Angora rabbit 50 0.94 0.98\n",
- " hamster 50 0.96 1\n",
- " porcupine 50 0.88 0.98\n",
- " fox squirrel 50 0.76 0.94\n",
- " marmot 50 0.92 0.96\n",
- " beaver 50 0.78 0.94\n",
- " guinea pig 50 0.78 0.94\n",
- " common sorrel 50 0.96 0.98\n",
- " zebra 50 0.94 0.96\n",
- " pig 50 0.5 0.76\n",
- " wild boar 50 0.84 0.96\n",
- " warthog 50 0.84 0.96\n",
- " hippopotamus 50 0.88 0.96\n",
- " ox 50 0.48 0.94\n",
- " water buffalo 50 0.78 0.94\n",
- " bison 50 0.88 0.96\n",
- " ram 50 0.58 0.92\n",
- " bighorn sheep 50 0.66 1\n",
- " Alpine ibex 50 0.92 0.98\n",
- " hartebeest 50 0.94 1\n",
- " impala 50 0.82 0.96\n",
- " gazelle 50 0.7 0.96\n",
- " dromedary 50 0.9 1\n",
- " llama 50 0.82 0.94\n",
- " weasel 50 0.44 0.92\n",
- " mink 50 0.78 0.96\n",
- " European polecat 50 0.46 0.9\n",
- " black-footed ferret 50 0.68 0.96\n",
- " otter 50 0.66 0.88\n",
- " skunk 50 0.96 0.96\n",
- " badger 50 0.86 0.92\n",
- " armadillo 50 0.88 0.9\n",
- " three-toed sloth 50 0.96 1\n",
- " orangutan 50 0.78 0.92\n",
- " gorilla 50 0.82 0.94\n",
- " chimpanzee 50 0.84 0.94\n",
- " gibbon 50 0.76 0.86\n",
- " siamang 50 0.68 0.94\n",
- " guenon 50 0.8 0.94\n",
- " patas monkey 50 0.62 0.82\n",
- " baboon 50 0.9 0.98\n",
- " macaque 50 0.8 0.86\n",
- " langur 50 0.6 0.82\n",
- " black-and-white colobus 50 0.86 0.9\n",
- " proboscis monkey 50 1 1\n",
- " marmoset 50 0.74 0.98\n",
- " white-headed capuchin 50 0.72 0.9\n",
- " howler monkey 50 0.86 0.94\n",
- " titi 50 0.5 0.9\n",
- "Geoffroy's spider monkey 50 0.42 0.8\n",
- " common squirrel monkey 50 0.76 0.92\n",
- " ring-tailed lemur 50 0.72 0.94\n",
- " indri 50 0.9 0.96\n",
- " Asian elephant 50 0.58 0.92\n",
- " African bush elephant 50 0.7 0.98\n",
- " red panda 50 0.94 0.94\n",
- " giant panda 50 0.94 0.98\n",
- " snoek 50 0.74 0.9\n",
- " eel 50 0.6 0.84\n",
- " coho salmon 50 0.84 0.96\n",
- " rock beauty 50 0.88 0.98\n",
- " clownfish 50 0.78 0.98\n",
- " sturgeon 50 0.68 0.94\n",
- " garfish 50 0.62 0.8\n",
- " lionfish 50 0.96 0.96\n",
- " pufferfish 50 0.88 0.96\n",
- " abacus 50 0.74 0.88\n",
- " abaya 50 0.84 0.92\n",
- " academic gown 50 0.42 0.86\n",
- " accordion 50 0.8 0.9\n",
- " acoustic guitar 50 0.5 0.76\n",
- " aircraft carrier 50 0.8 0.96\n",
- " airliner 50 0.92 1\n",
- " airship 50 0.76 0.82\n",
- " altar 50 0.64 0.98\n",
- " ambulance 50 0.88 0.98\n",
- " amphibious vehicle 50 0.64 0.94\n",
- " analog clock 50 0.52 0.92\n",
- " apiary 50 0.82 0.96\n",
- " apron 50 0.7 0.84\n",
- " waste container 50 0.4 0.8\n",
- " assault rifle 50 0.42 0.84\n",
- " backpack 50 0.34 0.64\n",
- " bakery 50 0.4 0.68\n",
- " balance beam 50 0.8 0.98\n",
- " balloon 50 0.86 0.96\n",
- " ballpoint pen 50 0.52 0.96\n",
- " Band-Aid 50 0.7 0.9\n",
- " banjo 50 0.84 1\n",
- " baluster 50 0.68 0.94\n",
- " barbell 50 0.56 0.9\n",
- " barber chair 50 0.7 0.92\n",
- " barbershop 50 0.54 0.86\n",
- " barn 50 0.96 0.96\n",
- " barometer 50 0.84 0.98\n",
- " barrel 50 0.56 0.88\n",
- " wheelbarrow 50 0.66 0.88\n",
- " baseball 50 0.74 0.98\n",
- " basketball 50 0.88 0.98\n",
- " bassinet 50 0.66 0.92\n",
- " bassoon 50 0.74 0.98\n",
- " swimming cap 50 0.62 0.88\n",
- " bath towel 50 0.54 0.78\n",
- " bathtub 50 0.4 0.88\n",
- " station wagon 50 0.66 0.84\n",
- " lighthouse 50 0.78 0.94\n",
- " beaker 50 0.52 0.68\n",
- " military cap 50 0.84 0.96\n",
- " beer bottle 50 0.66 0.88\n",
- " beer glass 50 0.6 0.84\n",
- " bell-cot 50 0.56 0.96\n",
- " bib 50 0.58 0.82\n",
- " tandem bicycle 50 0.86 0.96\n",
- " bikini 50 0.56 0.88\n",
- " ring binder 50 0.64 0.84\n",
- " binoculars 50 0.54 0.78\n",
- " birdhouse 50 0.86 0.94\n",
- " boathouse 50 0.74 0.92\n",
- " bobsleigh 50 0.92 0.96\n",
- " bolo tie 50 0.8 0.94\n",
- " poke bonnet 50 0.64 0.86\n",
- " bookcase 50 0.66 0.92\n",
- " bookstore 50 0.62 0.88\n",
- " bottle cap 50 0.58 0.7\n",
- " bow 50 0.72 0.86\n",
- " bow tie 50 0.7 0.9\n",
- " brass 50 0.92 0.96\n",
- " bra 50 0.5 0.7\n",
- " breakwater 50 0.62 0.86\n",
- " breastplate 50 0.4 0.9\n",
- " broom 50 0.6 0.86\n",
- " bucket 50 0.66 0.8\n",
- " buckle 50 0.5 0.68\n",
- " bulletproof vest 50 0.5 0.78\n",
- " high-speed train 50 0.94 0.96\n",
- " butcher shop 50 0.74 0.94\n",
- " taxicab 50 0.64 0.86\n",
- " cauldron 50 0.44 0.66\n",
- " candle 50 0.48 0.74\n",
- " cannon 50 0.88 0.94\n",
- " canoe 50 0.94 1\n",
- " can opener 50 0.66 0.86\n",
- " cardigan 50 0.68 0.8\n",
- " car mirror 50 0.94 0.96\n",
- " carousel 50 0.94 0.98\n",
- " tool kit 50 0.56 0.78\n",
- " carton 50 0.42 0.7\n",
- " car wheel 50 0.38 0.74\n",
- "automated teller machine 50 0.76 0.94\n",
- " cassette 50 0.52 0.8\n",
- " cassette player 50 0.28 0.9\n",
- " castle 50 0.78 0.88\n",
- " catamaran 50 0.78 1\n",
- " CD player 50 0.52 0.82\n",
- " cello 50 0.82 1\n",
- " mobile phone 50 0.68 0.86\n",
- " chain 50 0.38 0.66\n",
- " chain-link fence 50 0.7 0.84\n",
- " chain mail 50 0.64 0.9\n",
- " chainsaw 50 0.84 0.92\n",
- " chest 50 0.68 0.92\n",
- " chiffonier 50 0.26 0.64\n",
- " chime 50 0.62 0.84\n",
- " china cabinet 50 0.82 0.96\n",
- " Christmas stocking 50 0.92 0.94\n",
- " church 50 0.62 0.9\n",
- " movie theater 50 0.58 0.88\n",
- " cleaver 50 0.32 0.62\n",
- " cliff dwelling 50 0.88 1\n",
- " cloak 50 0.32 0.64\n",
- " clogs 50 0.58 0.88\n",
- " cocktail shaker 50 0.62 0.7\n",
- " coffee mug 50 0.44 0.72\n",
- " coffeemaker 50 0.64 0.92\n",
- " coil 50 0.66 0.84\n",
- " combination lock 50 0.64 0.84\n",
- " computer keyboard 50 0.7 0.82\n",
- " confectionery store 50 0.54 0.86\n",
- " container ship 50 0.82 0.98\n",
- " convertible 50 0.78 0.98\n",
- " corkscrew 50 0.82 0.92\n",
- " cornet 50 0.46 0.88\n",
- " cowboy boot 50 0.64 0.8\n",
- " cowboy hat 50 0.64 0.82\n",
- " cradle 50 0.38 0.8\n",
- " crane (machine) 50 0.78 0.94\n",
- " crash helmet 50 0.92 0.96\n",
- " crate 50 0.52 0.82\n",
- " infant bed 50 0.74 1\n",
- " Crock Pot 50 0.78 0.9\n",
- " croquet ball 50 0.9 0.96\n",
- " crutch 50 0.46 0.7\n",
- " cuirass 50 0.54 0.86\n",
- " dam 50 0.74 0.92\n",
- " desk 50 0.6 0.86\n",
- " desktop computer 50 0.54 0.94\n",
- " rotary dial telephone 50 0.88 0.94\n",
- " diaper 50 0.68 0.84\n",
- " digital clock 50 0.54 0.76\n",
- " digital watch 50 0.58 0.86\n",
- " dining table 50 0.76 0.9\n",
- " dishcloth 50 0.94 1\n",
- " dishwasher 50 0.44 0.78\n",
- " disc brake 50 0.98 1\n",
- " dock 50 0.54 0.94\n",
- " dog sled 50 0.84 1\n",
- " dome 50 0.72 0.92\n",
- " doormat 50 0.56 0.82\n",
- " drilling rig 50 0.84 0.96\n",
- " drum 50 0.38 0.68\n",
- " drumstick 50 0.56 0.72\n",
- " dumbbell 50 0.62 0.9\n",
- " Dutch oven 50 0.7 0.84\n",
- " electric fan 50 0.82 0.86\n",
- " electric guitar 50 0.62 0.84\n",
- " electric locomotive 50 0.92 0.98\n",
- " entertainment center 50 0.9 0.98\n",
- " envelope 50 0.44 0.86\n",
- " espresso machine 50 0.72 0.94\n",
- " face powder 50 0.7 0.92\n",
- " feather boa 50 0.7 0.84\n",
- " filing cabinet 50 0.88 0.98\n",
- " fireboat 50 0.94 0.98\n",
- " fire engine 50 0.84 0.9\n",
- " fire screen sheet 50 0.62 0.76\n",
- " flagpole 50 0.74 0.88\n",
- " flute 50 0.36 0.72\n",
- " folding chair 50 0.62 0.84\n",
- " football helmet 50 0.86 0.94\n",
- " forklift 50 0.8 0.92\n",
- " fountain 50 0.84 0.94\n",
- " fountain pen 50 0.76 0.92\n",
- " four-poster bed 50 0.78 0.94\n",
- " freight car 50 0.96 1\n",
- " French horn 50 0.76 0.92\n",
- " frying pan 50 0.36 0.78\n",
- " fur coat 50 0.84 0.96\n",
- " garbage truck 50 0.9 0.98\n",
- " gas mask 50 0.84 0.92\n",
- " gas pump 50 0.9 0.98\n",
- " goblet 50 0.68 0.82\n",
- " go-kart 50 0.9 1\n",
- " golf ball 50 0.84 0.9\n",
- " golf cart 50 0.78 0.86\n",
- " gondola 50 0.98 0.98\n",
- " gong 50 0.74 0.92\n",
- " gown 50 0.62 0.96\n",
- " grand piano 50 0.7 0.96\n",
- " greenhouse 50 0.8 0.98\n",
- " grille 50 0.72 0.9\n",
- " grocery store 50 0.66 0.94\n",
- " guillotine 50 0.86 0.92\n",
- " barrette 50 0.52 0.66\n",
- " hair spray 50 0.5 0.74\n",
- " half-track 50 0.78 0.9\n",
- " hammer 50 0.56 0.76\n",
- " hamper 50 0.64 0.84\n",
- " hair dryer 50 0.56 0.74\n",
- " hand-held computer 50 0.42 0.86\n",
- " handkerchief 50 0.78 0.94\n",
- " hard disk drive 50 0.76 0.84\n",
- " harmonica 50 0.7 0.88\n",
- " harp 50 0.88 0.96\n",
- " harvester 50 0.78 1\n",
- " hatchet 50 0.54 0.74\n",
- " holster 50 0.66 0.84\n",
- " home theater 50 0.64 0.94\n",
- " honeycomb 50 0.56 0.88\n",
- " hook 50 0.3 0.6\n",
- " hoop skirt 50 0.64 0.86\n",
- " horizontal bar 50 0.68 0.98\n",
- " horse-drawn vehicle 50 0.88 0.94\n",
- " hourglass 50 0.88 0.96\n",
- " iPod 50 0.76 0.94\n",
- " clothes iron 50 0.82 0.88\n",
- " jack-o'-lantern 50 0.98 0.98\n",
- " jeans 50 0.68 0.84\n",
- " jeep 50 0.72 0.9\n",
- " T-shirt 50 0.72 0.96\n",
- " jigsaw puzzle 50 0.84 0.94\n",
- " pulled rickshaw 50 0.86 0.94\n",
- " joystick 50 0.8 0.9\n",
- " kimono 50 0.84 0.96\n",
- " knee pad 50 0.62 0.88\n",
- " knot 50 0.66 0.8\n",
- " lab coat 50 0.8 0.96\n",
- " ladle 50 0.36 0.64\n",
- " lampshade 50 0.48 0.84\n",
- " laptop computer 50 0.26 0.88\n",
- " lawn mower 50 0.78 0.96\n",
- " lens cap 50 0.46 0.72\n",
- " paper knife 50 0.26 0.5\n",
- " library 50 0.54 0.9\n",
- " lifeboat 50 0.92 0.98\n",
- " lighter 50 0.56 0.78\n",
- " limousine 50 0.76 0.92\n",
- " ocean liner 50 0.88 0.94\n",
- " lipstick 50 0.74 0.9\n",
- " slip-on shoe 50 0.74 0.92\n",
- " lotion 50 0.5 0.86\n",
- " speaker 50 0.52 0.68\n",
- " loupe 50 0.32 0.52\n",
- " sawmill 50 0.72 0.9\n",
- " magnetic compass 50 0.52 0.82\n",
- " mail bag 50 0.68 0.92\n",
- " mailbox 50 0.82 0.92\n",
- " tights 50 0.22 0.94\n",
- " tank suit 50 0.24 0.9\n",
- " manhole cover 50 0.96 0.98\n",
- " maraca 50 0.74 0.9\n",
- " marimba 50 0.84 0.94\n",
- " mask 50 0.44 0.82\n",
- " match 50 0.66 0.9\n",
- " maypole 50 0.96 1\n",
- " maze 50 0.8 0.96\n",
- " measuring cup 50 0.54 0.76\n",
- " medicine chest 50 0.6 0.84\n",
- " megalith 50 0.8 0.92\n",
- " microphone 50 0.52 0.7\n",
- " microwave oven 50 0.48 0.72\n",
- " military uniform 50 0.62 0.84\n",
- " milk can 50 0.68 0.82\n",
- " minibus 50 0.7 1\n",
- " miniskirt 50 0.46 0.76\n",
- " minivan 50 0.38 0.8\n",
- " missile 50 0.4 0.84\n",
- " mitten 50 0.76 0.88\n",
- " mixing bowl 50 0.8 0.92\n",
- " mobile home 50 0.54 0.78\n",
- " Model T 50 0.92 0.96\n",
- " modem 50 0.58 0.86\n",
- " monastery 50 0.44 0.9\n",
- " monitor 50 0.4 0.86\n",
- " moped 50 0.56 0.94\n",
- " mortar 50 0.68 0.94\n",
- " square academic cap 50 0.5 0.84\n",
- " mosque 50 0.9 1\n",
- " mosquito net 50 0.9 0.98\n",
- " scooter 50 0.9 0.98\n",
- " mountain bike 50 0.78 0.96\n",
- " tent 50 0.88 0.96\n",
- " computer mouse 50 0.42 0.82\n",
- " mousetrap 50 0.76 0.88\n",
- " moving van 50 0.4 0.72\n",
- " muzzle 50 0.5 0.72\n",
- " nail 50 0.68 0.74\n",
- " neck brace 50 0.56 0.68\n",
- " necklace 50 0.86 1\n",
- " nipple 50 0.7 0.88\n",
- " notebook computer 50 0.34 0.84\n",
- " obelisk 50 0.8 0.92\n",
- " oboe 50 0.6 0.84\n",
- " ocarina 50 0.8 0.86\n",
- " odometer 50 0.96 1\n",
- " oil filter 50 0.58 0.82\n",
- " organ 50 0.82 0.9\n",
- " oscilloscope 50 0.9 0.96\n",
- " overskirt 50 0.2 0.7\n",
- " bullock cart 50 0.7 0.94\n",
- " oxygen mask 50 0.46 0.84\n",
- " packet 50 0.5 0.78\n",
- " paddle 50 0.56 0.94\n",
- " paddle wheel 50 0.86 0.96\n",
- " padlock 50 0.74 0.78\n",
- " paintbrush 50 0.62 0.8\n",
- " pajamas 50 0.56 0.92\n",
- " palace 50 0.64 0.96\n",
- " pan flute 50 0.84 0.86\n",
- " paper towel 50 0.66 0.84\n",
- " parachute 50 0.92 0.94\n",
- " parallel bars 50 0.62 0.96\n",
- " park bench 50 0.74 0.9\n",
- " parking meter 50 0.84 0.92\n",
- " passenger car 50 0.5 0.82\n",
- " patio 50 0.58 0.84\n",
- " payphone 50 0.74 0.92\n",
- " pedestal 50 0.52 0.9\n",
- " pencil case 50 0.64 0.92\n",
- " pencil sharpener 50 0.52 0.78\n",
- " perfume 50 0.7 0.9\n",
- " Petri dish 50 0.6 0.8\n",
- " photocopier 50 0.88 0.98\n",
- " plectrum 50 0.7 0.84\n",
- " Pickelhaube 50 0.72 0.86\n",
- " picket fence 50 0.84 0.94\n",
- " pickup truck 50 0.64 0.92\n",
- " pier 50 0.52 0.82\n",
- " piggy bank 50 0.82 0.94\n",
- " pill bottle 50 0.76 0.86\n",
- " pillow 50 0.76 0.9\n",
- " ping-pong ball 50 0.84 0.88\n",
- " pinwheel 50 0.76 0.88\n",
- " pirate ship 50 0.76 0.94\n",
- " pitcher 50 0.46 0.84\n",
- " hand plane 50 0.84 0.94\n",
- " planetarium 50 0.88 0.98\n",
- " plastic bag 50 0.36 0.62\n",
- " plate rack 50 0.52 0.78\n",
- " plow 50 0.78 0.88\n",
- " plunger 50 0.42 0.7\n",
- " Polaroid camera 50 0.84 0.92\n",
- " pole 50 0.38 0.74\n",
- " police van 50 0.76 0.94\n",
- " poncho 50 0.58 0.86\n",
- " billiard table 50 0.8 0.88\n",
- " soda bottle 50 0.56 0.94\n",
- " pot 50 0.78 0.92\n",
- " potter's wheel 50 0.9 0.94\n",
- " power drill 50 0.42 0.72\n",
- " prayer rug 50 0.7 0.86\n",
- " printer 50 0.54 0.86\n",
- " prison 50 0.7 0.9\n",
- " projectile 50 0.28 0.9\n",
- " projector 50 0.62 0.84\n",
- " hockey puck 50 0.92 0.96\n",
- " punching bag 50 0.6 0.68\n",
- " purse 50 0.42 0.78\n",
- " quill 50 0.68 0.84\n",
- " quilt 50 0.64 0.9\n",
- " race car 50 0.72 0.92\n",
- " racket 50 0.72 0.9\n",
- " radiator 50 0.66 0.76\n",
- " radio 50 0.64 0.92\n",
- " radio telescope 50 0.9 0.96\n",
- " rain barrel 50 0.8 0.98\n",
- " recreational vehicle 50 0.84 0.94\n",
- " reel 50 0.72 0.82\n",
- " reflex camera 50 0.72 0.92\n",
- " refrigerator 50 0.7 0.9\n",
- " remote control 50 0.7 0.88\n",
- " restaurant 50 0.5 0.66\n",
- " revolver 50 0.82 1\n",
- " rifle 50 0.38 0.7\n",
- " rocking chair 50 0.62 0.84\n",
- " rotisserie 50 0.88 0.92\n",
- " eraser 50 0.54 0.76\n",
- " rugby ball 50 0.86 0.94\n",
- " ruler 50 0.68 0.86\n",
- " running shoe 50 0.78 0.94\n",
- " safe 50 0.82 0.92\n",
- " safety pin 50 0.4 0.62\n",
- " salt shaker 50 0.66 0.9\n",
- " sandal 50 0.66 0.86\n",
- " sarong 50 0.64 0.86\n",
- " saxophone 50 0.66 0.88\n",
- " scabbard 50 0.76 0.92\n",
- " weighing scale 50 0.58 0.78\n",
- " school bus 50 0.92 1\n",
- " schooner 50 0.84 1\n",
- " scoreboard 50 0.9 0.96\n",
- " CRT screen 50 0.14 0.7\n",
- " screw 50 0.9 0.98\n",
- " screwdriver 50 0.3 0.58\n",
- " seat belt 50 0.88 0.94\n",
- " sewing machine 50 0.76 0.9\n",
- " shield 50 0.56 0.82\n",
- " shoe store 50 0.78 0.96\n",
- " shoji 50 0.8 0.92\n",
- " shopping basket 50 0.52 0.88\n",
- " shopping cart 50 0.76 0.92\n",
- " shovel 50 0.62 0.84\n",
- " shower cap 50 0.7 0.84\n",
- " shower curtain 50 0.64 0.82\n",
- " ski 50 0.74 0.92\n",
- " ski mask 50 0.72 0.88\n",
- " sleeping bag 50 0.68 0.8\n",
- " slide rule 50 0.72 0.88\n",
- " sliding door 50 0.44 0.78\n",
- " slot machine 50 0.94 0.98\n",
- " snorkel 50 0.86 0.98\n",
- " snowmobile 50 0.88 1\n",
- " snowplow 50 0.84 0.98\n",
- " soap dispenser 50 0.56 0.86\n",
- " soccer ball 50 0.86 0.96\n",
- " sock 50 0.62 0.76\n",
- " solar thermal collector 50 0.72 0.96\n",
- " sombrero 50 0.6 0.84\n",
- " soup bowl 50 0.56 0.94\n",
- " space bar 50 0.34 0.88\n",
- " space heater 50 0.52 0.74\n",
- " space shuttle 50 0.82 0.96\n",
- " spatula 50 0.3 0.6\n",
- " motorboat 50 0.86 1\n",
- " spider web 50 0.7 0.9\n",
- " spindle 50 0.86 0.98\n",
- " sports car 50 0.6 0.94\n",
- " spotlight 50 0.26 0.6\n",
- " stage 50 0.68 0.86\n",
- " steam locomotive 50 0.94 1\n",
- " through arch bridge 50 0.84 0.96\n",
- " steel drum 50 0.82 0.9\n",
- " stethoscope 50 0.6 0.82\n",
- " scarf 50 0.5 0.92\n",
- " stone wall 50 0.76 0.9\n",
- " stopwatch 50 0.58 0.9\n",
- " stove 50 0.46 0.74\n",
- " strainer 50 0.64 0.84\n",
- " tram 50 0.88 0.96\n",
- " stretcher 50 0.6 0.8\n",
- " couch 50 0.8 0.96\n",
- " stupa 50 0.88 0.88\n",
- " submarine 50 0.72 0.92\n",
- " suit 50 0.4 0.78\n",
- " sundial 50 0.58 0.74\n",
- " sunglass 50 0.14 0.58\n",
- " sunglasses 50 0.28 0.58\n",
- " sunscreen 50 0.32 0.7\n",
- " suspension bridge 50 0.6 0.94\n",
- " mop 50 0.74 0.92\n",
- " sweatshirt 50 0.28 0.66\n",
- " swimsuit 50 0.52 0.82\n",
- " swing 50 0.76 0.84\n",
- " switch 50 0.56 0.76\n",
- " syringe 50 0.62 0.82\n",
- " table lamp 50 0.6 0.88\n",
- " tank 50 0.8 0.96\n",
- " tape player 50 0.46 0.76\n",
- " teapot 50 0.84 1\n",
- " teddy bear 50 0.82 0.94\n",
- " television 50 0.6 0.9\n",
- " tennis ball 50 0.7 0.94\n",
- " thatched roof 50 0.88 0.9\n",
- " front curtain 50 0.8 0.92\n",
- " thimble 50 0.6 0.8\n",
- " threshing machine 50 0.56 0.88\n",
- " throne 50 0.72 0.82\n",
- " tile roof 50 0.72 0.94\n",
- " toaster 50 0.66 0.84\n",
- " tobacco shop 50 0.42 0.7\n",
- " toilet seat 50 0.62 0.88\n",
- " torch 50 0.64 0.84\n",
- " totem pole 50 0.92 0.98\n",
- " tow truck 50 0.62 0.88\n",
- " toy store 50 0.6 0.94\n",
- " tractor 50 0.76 0.98\n",
- " semi-trailer truck 50 0.78 0.92\n",
- " tray 50 0.46 0.64\n",
- " trench coat 50 0.54 0.72\n",
- " tricycle 50 0.72 0.94\n",
- " trimaran 50 0.7 0.98\n",
- " tripod 50 0.58 0.86\n",
- " triumphal arch 50 0.92 0.98\n",
- " trolleybus 50 0.9 1\n",
- " trombone 50 0.54 0.88\n",
- " tub 50 0.24 0.82\n",
- " turnstile 50 0.84 0.94\n",
- " typewriter keyboard 50 0.68 0.98\n",
- " umbrella 50 0.52 0.7\n",
- " unicycle 50 0.74 0.96\n",
- " upright piano 50 0.76 0.9\n",
- " vacuum cleaner 50 0.62 0.9\n",
- " vase 50 0.5 0.78\n",
- " vault 50 0.76 0.92\n",
- " velvet 50 0.2 0.42\n",
- " vending machine 50 0.9 1\n",
- " vestment 50 0.54 0.82\n",
- " viaduct 50 0.78 0.86\n",
- " violin 50 0.68 0.78\n",
- " volleyball 50 0.86 1\n",
- " waffle iron 50 0.72 0.88\n",
- " wall clock 50 0.54 0.88\n",
- " wallet 50 0.52 0.9\n",
- " wardrobe 50 0.68 0.88\n",
- " military aircraft 50 0.9 0.98\n",
- " sink 50 0.72 0.96\n",
- " washing machine 50 0.78 0.94\n",
- " water bottle 50 0.54 0.74\n",
- " water jug 50 0.22 0.74\n",
- " water tower 50 0.9 0.96\n",
- " whiskey jug 50 0.64 0.74\n",
- " whistle 50 0.72 0.84\n",
- " wig 50 0.84 0.9\n",
- " window screen 50 0.68 0.8\n",
- " window shade 50 0.52 0.76\n",
- " Windsor tie 50 0.22 0.66\n",
- " wine bottle 50 0.42 0.82\n",
- " wing 50 0.54 0.96\n",
- " wok 50 0.46 0.82\n",
- " wooden spoon 50 0.58 0.8\n",
- " wool 50 0.32 0.82\n",
- " split-rail fence 50 0.74 0.9\n",
- " shipwreck 50 0.84 0.96\n",
- " yawl 50 0.78 0.96\n",
- " yurt 50 0.84 1\n",
- " website 50 0.98 1\n",
- " comic book 50 0.62 0.9\n",
- " crossword 50 0.84 0.88\n",
- " traffic sign 50 0.78 0.9\n",
- " traffic light 50 0.8 0.94\n",
- " dust jacket 50 0.72 0.94\n",
- " menu 50 0.82 0.96\n",
- " plate 50 0.44 0.88\n",
- " guacamole 50 0.8 0.92\n",
- " consomme 50 0.54 0.88\n",
- " hot pot 50 0.86 0.98\n",
- " trifle 50 0.92 0.98\n",
- " ice cream 50 0.68 0.94\n",
- " ice pop 50 0.62 0.84\n",
- " baguette 50 0.62 0.88\n",
- " bagel 50 0.64 0.92\n",
- " pretzel 50 0.72 0.88\n",
- " cheeseburger 50 0.9 1\n",
- " hot dog 50 0.74 0.94\n",
- " mashed potato 50 0.74 0.9\n",
- " cabbage 50 0.84 0.96\n",
- " broccoli 50 0.9 0.96\n",
- " cauliflower 50 0.82 1\n",
- " zucchini 50 0.74 0.9\n",
- " spaghetti squash 50 0.8 0.96\n",
- " acorn squash 50 0.82 0.96\n",
- " butternut squash 50 0.7 0.94\n",
- " cucumber 50 0.6 0.96\n",
- " artichoke 50 0.84 0.94\n",
- " bell pepper 50 0.84 0.98\n",
- " cardoon 50 0.88 0.94\n",
- " mushroom 50 0.38 0.92\n",
- " Granny Smith 50 0.9 0.96\n",
- " strawberry 50 0.6 0.88\n",
- " orange 50 0.7 0.92\n",
- " lemon 50 0.78 0.98\n",
- " fig 50 0.82 0.96\n",
- " pineapple 50 0.86 0.96\n",
- " banana 50 0.84 0.96\n",
- " jackfruit 50 0.9 0.98\n",
- " custard apple 50 0.86 0.96\n",
- " pomegranate 50 0.82 0.98\n",
- " hay 50 0.8 0.92\n",
- " carbonara 50 0.88 0.94\n",
- " chocolate syrup 50 0.46 0.84\n",
- " dough 50 0.4 0.6\n",
- " meatloaf 50 0.58 0.84\n",
- " pizza 50 0.84 0.96\n",
- " pot pie 50 0.68 0.9\n",
- " burrito 50 0.8 0.98\n",
- " red wine 50 0.54 0.82\n",
- " espresso 50 0.64 0.88\n",
- " cup 50 0.38 0.7\n",
- " eggnog 50 0.38 0.7\n",
- " alp 50 0.54 0.88\n",
- " bubble 50 0.8 0.96\n",
- " cliff 50 0.64 1\n",
- " coral reef 50 0.72 0.96\n",
- " geyser 50 0.94 1\n",
- " lakeshore 50 0.54 0.88\n",
- " promontory 50 0.58 0.94\n",
- " shoal 50 0.6 0.96\n",
- " seashore 50 0.44 0.78\n",
- " valley 50 0.72 0.94\n",
- " volcano 50 0.78 0.96\n",
- " baseball player 50 0.72 0.94\n",
- " bridegroom 50 0.72 0.88\n",
- " scuba diver 50 0.8 1\n",
- " rapeseed 50 0.94 0.98\n",
- " daisy 50 0.96 0.98\n",
- " yellow lady's slipper 50 1 1\n",
- " corn 50 0.4 0.88\n",
- " acorn 50 0.92 0.98\n",
- " rose hip 50 0.92 0.98\n",
- " horse chestnut seed 50 0.94 0.98\n",
- " coral fungus 50 0.96 0.96\n",
- " agaric 50 0.82 0.94\n",
- " gyromitra 50 0.98 1\n",
- " stinkhorn mushroom 50 0.8 0.94\n",
- " earth star 50 0.98 1\n",
- " hen-of-the-woods 50 0.8 0.96\n",
- " bolete 50 0.74 0.94\n",
- " ear 50 0.48 0.94\n",
- " toilet paper 50 0.36 0.68\n",
- "Speed: 0.1ms pre-process, 0.3ms inference, 0.0ms post-process per image at shape (1, 3, 224, 224)\n",
- "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n"
- ]
- }
- ],
- "source": [
- "# Validate YOLOv5s on Imagenet val\n",
- "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 --half"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "ZY2VXXXu74w5"
- },
- "source": [
- "# 3. Train\n",
- "\n",
- "
\n",
- "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
- "
\n",
- "\n",
- "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n",
- "\n",
- "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
- "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
- "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n",
- "
\n",
- "\n",
- "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n",
- "\n",
- "## Train on Custom Data with Roboflow 🌟 NEW\n",
- "\n",
- "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n",
- "\n",
- "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n",
- "- Custom Training Notebook: [](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n",
- "
\n",
- "\n",
- "
Label images lightning fast (including with model-assisted labeling)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "i3oKtE4g-aNn"
- },
- "outputs": [],
- "source": [
- "# @title Select YOLOv5 🚀 logger {run: 'auto'}\n",
- "logger = \"Comet\" # @param ['Comet', 'ClearML', 'TensorBoard']\n",
- "\n",
- "if logger == \"Comet\":\n",
- " %pip install -q comet_ml\n",
- " import comet_ml\n",
- "\n",
- " comet_ml.init()\n",
- "elif logger == \"ClearML\":\n",
- " %pip install -q clearml\n",
- " import clearml\n",
- "\n",
- " clearml.browser_login()\n",
- "elif logger == \"TensorBoard\":\n",
- " %load_ext tensorboard\n",
- " %tensorboard --logdir runs/train"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "1NcFxRcFdJ_O",
- "outputId": "77c8d487-16db-4073-b3ea-06cabf2e7766"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=5, batch_size=64, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n",
- "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
- "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
- "\n",
- "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n",
- "\n",
- "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n",
- "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n",
- "100% 103M/103M [00:00<00:00, 347MB/s] \n",
- "Unzipping /content/datasets/imagenette160.zip...\n",
- "Dataset download success ✅ (3.3s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n",
- "\n",
- "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n",
- "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n",
- "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n",
- "Image sizes 224 train, 224 test\n",
- "Using 1 dataloader workers\n",
- "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n",
- "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 5 epochs...\n",
- "\n",
- " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n",
- " 1/5 1.47G 1.05 0.974 0.828 0.975: 100% 148/148 [00:38<00:00, 3.82it/s]\n",
- " 2/5 1.73G 0.895 0.766 0.911 0.994: 100% 148/148 [00:36<00:00, 4.03it/s]\n",
- " 3/5 1.73G 0.82 0.704 0.934 0.996: 100% 148/148 [00:35<00:00, 4.20it/s]\n",
- " 4/5 1.73G 0.766 0.664 0.951 0.998: 100% 148/148 [00:36<00:00, 4.05it/s]\n",
- " 5/5 1.73G 0.724 0.634 0.959 0.997: 100% 148/148 [00:37<00:00, 3.94it/s]\n",
- "\n",
- "Training complete (0.052 hours)\n",
- "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n",
- "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n",
- "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n",
- "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n",
- "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n",
- "Visualize: https://netron.app\n",
- "\n"
- ]
- }
- ],
- "source": [
- "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n",
- "!python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 --cache"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "15glLzbQx5u0"
- },
- "source": [
- "# 4. Visualize"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "nWOsI5wJR1o3"
- },
- "source": [
- "## Comet Logging and Visualization 🌟 NEW\n",
- "\n",
- "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n",
- "\n",
- "Getting started is easy:\n",
- "```shell\n",
- "pip install comet_ml # 1. install\n",
- "export COMET_API_KEY= # 2. paste API key\n",
- "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n",
- "```\n",
- "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n",
- "[](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n",
- "\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Lay2WsTjNJzP"
- },
- "source": [
- "## ClearML Logging and Automation 🌟 NEW\n",
- "\n",
- "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n",
- "\n",
- "- `pip install clearml`\n",
- "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n",
- "\n",
- "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n",
- "\n",
- "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n",
- "\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "-WPvRbS5Swl6"
- },
- "source": [
- "## Local Logging\n",
- "\n",
- "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
- "\n",
- "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n",
- "\n",
- "
\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Zelyeqbyt3GD"
- },
- "source": [
- "# Environments\n",
- "\n",
- "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
- "\n",
- "- **Notebooks** with free GPU:
\n",
- "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n",
- "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n",
- "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/)
\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "6Qu7Iesl0p54"
- },
- "source": [
- "# Status\n",
- "\n",
- "\n",
- "\n",
- "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "IEijrePND_2I"
- },
- "source": [
- "# Appendix\n",
- "\n",
- "Additional content below."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "GMusP4OAxFu6"
- },
- "outputs": [],
- "source": [
- "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
- "\n",
- "model = torch.hub.load(\n",
- " \"ultralytics/yolov5\", \"yolov5s\", force_reload=True, trust_repo=True\n",
- ") # or yolov5n - yolov5x6 or custom\n",
- "im = \"https://ultralytics.com/images/zidane.jpg\" # file, Path, PIL.Image, OpenCV, nparray, list\n",
- "results = model(im) # inference\n",
- "results.print() # or .show(), .save(), .crop(), .pandas(), etc."
- ]
- }
- ],
- "metadata": {
- "accelerator": "GPU",
- "colab": {
- "name": "YOLOv5 Classification Tutorial",
- "provenance": []
- },
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.12"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/yolov5/classify/val.py b/yolov5/classify/val.py
deleted file mode 100644
index 72bd0e1..0000000
--- a/yolov5/classify/val.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-"""
-Validate a trained YOLOv5 classification model on a classification dataset.
-
-Usage:
- $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
- $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet
-
-Usage - formats:
- $ python classify/val.py --weights yolov5s-cls.pt # PyTorch
- yolov5s-cls.torchscript # TorchScript
- yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s-cls_openvino_model # OpenVINO
- yolov5s-cls.engine # TensorRT
- yolov5s-cls.mlmodel # CoreML (macOS-only)
- yolov5s-cls_saved_model # TensorFlow SavedModel
- yolov5s-cls.pb # TensorFlow GraphDef
- yolov5s-cls.tflite # TensorFlow Lite
- yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
- yolov5s-cls_paddle_model # PaddlePaddle
-"""
-
-import argparse
-import os
-import sys
-from pathlib import Path
-
-import torch
-from tqdm import tqdm
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-from models.common import DetectMultiBackend
-from utils.dataloaders import create_classification_dataloader
-from utils.general import (
- LOGGER,
- TQDM_BAR_FORMAT,
- Profile,
- check_img_size,
- check_requirements,
- colorstr,
- increment_path,
- print_args,
-)
-from utils.torch_utils import select_device, smart_inference_mode
-
-
-@smart_inference_mode()
-def run(
- data=ROOT / "../datasets/mnist", # dataset dir
- weights=ROOT / "yolov5s-cls.pt", # model.pt path(s)
- batch_size=128, # batch size
- imgsz=224, # inference size (pixels)
- device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
- workers=8, # max dataloader workers (per RANK in DDP mode)
- verbose=False, # verbose output
- project=ROOT / "runs/val-cls", # save to project/name
- name="exp", # save to project/name
- exist_ok=False, # existing project/name ok, do not increment
- half=False, # use FP16 half-precision inference
- dnn=False, # use OpenCV DNN for ONNX inference
- model=None,
- dataloader=None,
- criterion=None,
- pbar=None,
-):
- """Validates a YOLOv5 classification model on a dataset, computing metrics like top1 and top5 accuracy."""
- # Initialize/load model and set device
- training = model is not None
- if training: # called by train.py
- device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
- half &= device.type != "cpu" # half precision only supported on CUDA
- model.half() if half else model.float()
- else: # called directly
- device = select_device(device, batch_size=batch_size)
-
- # Directories
- save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
- save_dir.mkdir(parents=True, exist_ok=True) # make dir
-
- # Load model
- model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
- stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
- imgsz = check_img_size(imgsz, s=stride) # check image size
- half = model.fp16 # FP16 supported on limited backends with CUDA
- if engine:
- batch_size = model.batch_size
- else:
- device = model.device
- if not (pt or jit):
- batch_size = 1 # export.py models default to batch-size 1
- LOGGER.info(f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models")
-
- # Dataloader
- data = Path(data)
- test_dir = data / "test" if (data / "test").exists() else data / "val" # data/test or data/val
- dataloader = create_classification_dataloader(
- path=test_dir, imgsz=imgsz, batch_size=batch_size, augment=False, rank=-1, workers=workers
- )
-
- model.eval()
- pred, targets, loss, dt = [], [], 0, (Profile(device=device), Profile(device=device), Profile(device=device))
- n = len(dataloader) # number of batches
- action = "validating" if dataloader.dataset.root.stem == "val" else "testing"
- desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}"
- bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
- with torch.cuda.amp.autocast(enabled=device.type != "cpu"):
- for images, labels in bar:
- with dt[0]:
- images, labels = images.to(device, non_blocking=True), labels.to(device)
-
- with dt[1]:
- y = model(images)
-
- with dt[2]:
- pred.append(y.argsort(1, descending=True)[:, :5])
- targets.append(labels)
- if criterion:
- loss += criterion(y, labels)
-
- loss /= n
- pred, targets = torch.cat(pred), torch.cat(targets)
- correct = (targets[:, None] == pred).float()
- acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy
- top1, top5 = acc.mean(0).tolist()
-
- if pbar:
- pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}"
- if verbose: # all classes
- LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}")
- LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}")
- for i, c in model.names.items():
- acc_i = acc[targets == i]
- top1i, top5i = acc_i.mean(0).tolist()
- LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}")
-
- # Print results
- t = tuple(x.t / len(dataloader.dataset.samples) * 1e3 for x in dt) # speeds per image
- shape = (1, 3, imgsz, imgsz)
- LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}" % t)
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
-
- return top1, top5, loss
-
-
-def parse_opt():
- """Parses and returns command line arguments for YOLOv5 model evaluation and inference settings."""
- parser = argparse.ArgumentParser()
- parser.add_argument("--data", type=str, default=ROOT / "../datasets/mnist", help="dataset path")
- parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-cls.pt", help="model.pt path(s)")
- parser.add_argument("--batch-size", type=int, default=128, help="batch size")
- parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=224, help="inference size (pixels)")
- parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
- parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
- parser.add_argument("--verbose", nargs="?", const=True, default=True, help="verbose output")
- parser.add_argument("--project", default=ROOT / "runs/val-cls", help="save to project/name")
- parser.add_argument("--name", default="exp", help="save to project/name")
- parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
- parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
- parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
- opt = parser.parse_args()
- print_args(vars(opt))
- return opt
-
-
-def main(opt):
- """Executes the YOLOv5 model prediction workflow, handling argument parsing and requirement checks."""
- check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
- run(**vars(opt))
-
-
-if __name__ == "__main__":
- opt = parse_opt()
- main(opt)
diff --git a/yolov5/data/Argoverse.yaml b/yolov5/data/Argoverse.yaml
deleted file mode 100644
index 651b643..0000000
--- a/yolov5/data/Argoverse.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
-# Example usage: python train.py --data Argoverse.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── Argoverse ← downloads here (31.3 GB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/Argoverse # dataset root dir
-train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
-val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
-test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
-
-# Classes
-names:
- 0: person
- 1: bicycle
- 2: car
- 3: motorcycle
- 4: bus
- 5: truck
- 6: traffic_light
- 7: stop_sign
-
-# Download script/URL (optional) ---------------------------------------------------------------------------------------
-download: |
- import json
-
- from tqdm import tqdm
- from utils.general import download, Path
-
-
- def argoverse2yolo(set):
- labels = {}
- a = json.load(open(set, "rb"))
- for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
- img_id = annot['image_id']
- img_name = a['images'][img_id]['name']
- img_label_name = f'{img_name[:-3]}txt'
-
- cls = annot['category_id'] # instance class id
- x_center, y_center, width, height = annot['bbox']
- x_center = (x_center + width / 2) / 1920.0 # offset and scale
- y_center = (y_center + height / 2) / 1200.0 # offset and scale
- width /= 1920.0 # scale
- height /= 1200.0 # scale
-
- img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
- if not img_dir.exists():
- img_dir.mkdir(parents=True, exist_ok=True)
-
- k = str(img_dir / img_label_name)
- if k not in labels:
- labels[k] = []
- labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
-
- for k in labels:
- with open(k, "w") as f:
- f.writelines(labels[k])
-
-
- # Download
- dir = Path(yaml['path']) # dataset root dir
- urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
- download(urls, dir=dir, delete=False)
-
- # Convert
- annotations_dir = 'Argoverse-HD/annotations/'
- (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
- for d in "train.json", "val.json":
- argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
diff --git a/yolov5/data/GlobalWheat2020.yaml b/yolov5/data/GlobalWheat2020.yaml
deleted file mode 100644
index eb25871..0000000
--- a/yolov5/data/GlobalWheat2020.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
-# Example usage: python train.py --data GlobalWheat2020.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── GlobalWheat2020 ← downloads here (7.0 GB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/GlobalWheat2020 # dataset root dir
-train: # train images (relative to 'path') 3422 images
- - images/arvalis_1
- - images/arvalis_2
- - images/arvalis_3
- - images/ethz_1
- - images/rres_1
- - images/inrae_1
- - images/usask_1
-val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
- - images/ethz_1
-test: # test images (optional) 1276 images
- - images/utokyo_1
- - images/utokyo_2
- - images/nau_1
- - images/uq_1
-
-# Classes
-names:
- 0: wheat_head
-
-# Download script/URL (optional) ---------------------------------------------------------------------------------------
-download: |
- from utils.general import download, Path
-
-
- # Download
- dir = Path(yaml['path']) # dataset root dir
- urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
- 'https://github.com/ultralytics/assets/releases/download/v0.0.0/GlobalWheat2020_labels.zip']
- download(urls, dir=dir)
-
- # Make Directories
- for p in 'annotations', 'images', 'labels':
- (dir / p).mkdir(parents=True, exist_ok=True)
-
- # Move
- for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
- 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
- (dir / p).rename(dir / 'images' / p) # move to /images
- f = (dir / p).with_suffix('.json') # json file
- if f.exists():
- f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
diff --git a/yolov5/data/ImageNet.yaml b/yolov5/data/ImageNet.yaml
deleted file mode 100644
index a3cf694..0000000
--- a/yolov5/data/ImageNet.yaml
+++ /dev/null
@@ -1,1021 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
-# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
-# Example usage: python classify/train.py --data imagenet
-# parent
-# ├── yolov5
-# └── datasets
-# └── imagenet ← downloads here (144 GB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/imagenet # dataset root dir
-train: train # train images (relative to 'path') 1281167 images
-val: val # val images (relative to 'path') 50000 images
-test: # test images (optional)
-
-# Classes
-names:
- 0: tench
- 1: goldfish
- 2: great white shark
- 3: tiger shark
- 4: hammerhead shark
- 5: electric ray
- 6: stingray
- 7: cock
- 8: hen
- 9: ostrich
- 10: brambling
- 11: goldfinch
- 12: house finch
- 13: junco
- 14: indigo bunting
- 15: American robin
- 16: bulbul
- 17: jay
- 18: magpie
- 19: chickadee
- 20: American dipper
- 21: kite
- 22: bald eagle
- 23: vulture
- 24: great grey owl
- 25: fire salamander
- 26: smooth newt
- 27: newt
- 28: spotted salamander
- 29: axolotl
- 30: American bullfrog
- 31: tree frog
- 32: tailed frog
- 33: loggerhead sea turtle
- 34: leatherback sea turtle
- 35: mud turtle
- 36: terrapin
- 37: box turtle
- 38: banded gecko
- 39: green iguana
- 40: Carolina anole
- 41: desert grassland whiptail lizard
- 42: agama
- 43: frilled-necked lizard
- 44: alligator lizard
- 45: Gila monster
- 46: European green lizard
- 47: chameleon
- 48: Komodo dragon
- 49: Nile crocodile
- 50: American alligator
- 51: triceratops
- 52: worm snake
- 53: ring-necked snake
- 54: eastern hog-nosed snake
- 55: smooth green snake
- 56: kingsnake
- 57: garter snake
- 58: water snake
- 59: vine snake
- 60: night snake
- 61: boa constrictor
- 62: African rock python
- 63: Indian cobra
- 64: green mamba
- 65: sea snake
- 66: Saharan horned viper
- 67: eastern diamondback rattlesnake
- 68: sidewinder
- 69: trilobite
- 70: harvestman
- 71: scorpion
- 72: yellow garden spider
- 73: barn spider
- 74: European garden spider
- 75: southern black widow
- 76: tarantula
- 77: wolf spider
- 78: tick
- 79: centipede
- 80: black grouse
- 81: ptarmigan
- 82: ruffed grouse
- 83: prairie grouse
- 84: peacock
- 85: quail
- 86: partridge
- 87: grey parrot
- 88: macaw
- 89: sulphur-crested cockatoo
- 90: lorikeet
- 91: coucal
- 92: bee eater
- 93: hornbill
- 94: hummingbird
- 95: jacamar
- 96: toucan
- 97: duck
- 98: red-breasted merganser
- 99: goose
- 100: black swan
- 101: tusker
- 102: echidna
- 103: platypus
- 104: wallaby
- 105: koala
- 106: wombat
- 107: jellyfish
- 108: sea anemone
- 109: brain coral
- 110: flatworm
- 111: nematode
- 112: conch
- 113: snail
- 114: slug
- 115: sea slug
- 116: chiton
- 117: chambered nautilus
- 118: Dungeness crab
- 119: rock crab
- 120: fiddler crab
- 121: red king crab
- 122: American lobster
- 123: spiny lobster
- 124: crayfish
- 125: hermit crab
- 126: isopod
- 127: white stork
- 128: black stork
- 129: spoonbill
- 130: flamingo
- 131: little blue heron
- 132: great egret
- 133: bittern
- 134: crane (bird)
- 135: limpkin
- 136: common gallinule
- 137: American coot
- 138: bustard
- 139: ruddy turnstone
- 140: dunlin
- 141: common redshank
- 142: dowitcher
- 143: oystercatcher
- 144: pelican
- 145: king penguin
- 146: albatross
- 147: grey whale
- 148: killer whale
- 149: dugong
- 150: sea lion
- 151: Chihuahua
- 152: Japanese Chin
- 153: Maltese
- 154: Pekingese
- 155: Shih Tzu
- 156: King Charles Spaniel
- 157: Papillon
- 158: toy terrier
- 159: Rhodesian Ridgeback
- 160: Afghan Hound
- 161: Basset Hound
- 162: Beagle
- 163: Bloodhound
- 164: Bluetick Coonhound
- 165: Black and Tan Coonhound
- 166: Treeing Walker Coonhound
- 167: English foxhound
- 168: Redbone Coonhound
- 169: borzoi
- 170: Irish Wolfhound
- 171: Italian Greyhound
- 172: Whippet
- 173: Ibizan Hound
- 174: Norwegian Elkhound
- 175: Otterhound
- 176: Saluki
- 177: Scottish Deerhound
- 178: Weimaraner
- 179: Staffordshire Bull Terrier
- 180: American Staffordshire Terrier
- 181: Bedlington Terrier
- 182: Border Terrier
- 183: Kerry Blue Terrier
- 184: Irish Terrier
- 185: Norfolk Terrier
- 186: Norwich Terrier
- 187: Yorkshire Terrier
- 188: Wire Fox Terrier
- 189: Lakeland Terrier
- 190: Sealyham Terrier
- 191: Airedale Terrier
- 192: Cairn Terrier
- 193: Australian Terrier
- 194: Dandie Dinmont Terrier
- 195: Boston Terrier
- 196: Miniature Schnauzer
- 197: Giant Schnauzer
- 198: Standard Schnauzer
- 199: Scottish Terrier
- 200: Tibetan Terrier
- 201: Australian Silky Terrier
- 202: Soft-coated Wheaten Terrier
- 203: West Highland White Terrier
- 204: Lhasa Apso
- 205: Flat-Coated Retriever
- 206: Curly-coated Retriever
- 207: Golden Retriever
- 208: Labrador Retriever
- 209: Chesapeake Bay Retriever
- 210: German Shorthaired Pointer
- 211: Vizsla
- 212: English Setter
- 213: Irish Setter
- 214: Gordon Setter
- 215: Brittany
- 216: Clumber Spaniel
- 217: English Springer Spaniel
- 218: Welsh Springer Spaniel
- 219: Cocker Spaniels
- 220: Sussex Spaniel
- 221: Irish Water Spaniel
- 222: Kuvasz
- 223: Schipperke
- 224: Groenendael
- 225: Malinois
- 226: Briard
- 227: Australian Kelpie
- 228: Komondor
- 229: Old English Sheepdog
- 230: Shetland Sheepdog
- 231: collie
- 232: Border Collie
- 233: Bouvier des Flandres
- 234: Rottweiler
- 235: German Shepherd Dog
- 236: Dobermann
- 237: Miniature Pinscher
- 238: Greater Swiss Mountain Dog
- 239: Bernese Mountain Dog
- 240: Appenzeller Sennenhund
- 241: Entlebucher Sennenhund
- 242: Boxer
- 243: Bullmastiff
- 244: Tibetan Mastiff
- 245: French Bulldog
- 246: Great Dane
- 247: St. Bernard
- 248: husky
- 249: Alaskan Malamute
- 250: Siberian Husky
- 251: Dalmatian
- 252: Affenpinscher
- 253: Basenji
- 254: pug
- 255: Leonberger
- 256: Newfoundland
- 257: Pyrenean Mountain Dog
- 258: Samoyed
- 259: Pomeranian
- 260: Chow Chow
- 261: Keeshond
- 262: Griffon Bruxellois
- 263: Pembroke Welsh Corgi
- 264: Cardigan Welsh Corgi
- 265: Toy Poodle
- 266: Miniature Poodle
- 267: Standard Poodle
- 268: Mexican hairless dog
- 269: grey wolf
- 270: Alaskan tundra wolf
- 271: red wolf
- 272: coyote
- 273: dingo
- 274: dhole
- 275: African wild dog
- 276: hyena
- 277: red fox
- 278: kit fox
- 279: Arctic fox
- 280: grey fox
- 281: tabby cat
- 282: tiger cat
- 283: Persian cat
- 284: Siamese cat
- 285: Egyptian Mau
- 286: cougar
- 287: lynx
- 288: leopard
- 289: snow leopard
- 290: jaguar
- 291: lion
- 292: tiger
- 293: cheetah
- 294: brown bear
- 295: American black bear
- 296: polar bear
- 297: sloth bear
- 298: mongoose
- 299: meerkat
- 300: tiger beetle
- 301: ladybug
- 302: ground beetle
- 303: longhorn beetle
- 304: leaf beetle
- 305: dung beetle
- 306: rhinoceros beetle
- 307: weevil
- 308: fly
- 309: bee
- 310: ant
- 311: grasshopper
- 312: cricket
- 313: stick insect
- 314: cockroach
- 315: mantis
- 316: cicada
- 317: leafhopper
- 318: lacewing
- 319: dragonfly
- 320: damselfly
- 321: red admiral
- 322: ringlet
- 323: monarch butterfly
- 324: small white
- 325: sulphur butterfly
- 326: gossamer-winged butterfly
- 327: starfish
- 328: sea urchin
- 329: sea cucumber
- 330: cottontail rabbit
- 331: hare
- 332: Angora rabbit
- 333: hamster
- 334: porcupine
- 335: fox squirrel
- 336: marmot
- 337: beaver
- 338: guinea pig
- 339: common sorrel
- 340: zebra
- 341: pig
- 342: wild boar
- 343: warthog
- 344: hippopotamus
- 345: ox
- 346: water buffalo
- 347: bison
- 348: ram
- 349: bighorn sheep
- 350: Alpine ibex
- 351: hartebeest
- 352: impala
- 353: gazelle
- 354: dromedary
- 355: llama
- 356: weasel
- 357: mink
- 358: European polecat
- 359: black-footed ferret
- 360: otter
- 361: skunk
- 362: badger
- 363: armadillo
- 364: three-toed sloth
- 365: orangutan
- 366: gorilla
- 367: chimpanzee
- 368: gibbon
- 369: siamang
- 370: guenon
- 371: patas monkey
- 372: baboon
- 373: macaque
- 374: langur
- 375: black-and-white colobus
- 376: proboscis monkey
- 377: marmoset
- 378: white-headed capuchin
- 379: howler monkey
- 380: titi
- 381: Geoffroy's spider monkey
- 382: common squirrel monkey
- 383: ring-tailed lemur
- 384: indri
- 385: Asian elephant
- 386: African bush elephant
- 387: red panda
- 388: giant panda
- 389: snoek
- 390: eel
- 391: coho salmon
- 392: rock beauty
- 393: clownfish
- 394: sturgeon
- 395: garfish
- 396: lionfish
- 397: pufferfish
- 398: abacus
- 399: abaya
- 400: academic gown
- 401: accordion
- 402: acoustic guitar
- 403: aircraft carrier
- 404: airliner
- 405: airship
- 406: altar
- 407: ambulance
- 408: amphibious vehicle
- 409: analog clock
- 410: apiary
- 411: apron
- 412: waste container
- 413: assault rifle
- 414: backpack
- 415: bakery
- 416: balance beam
- 417: balloon
- 418: ballpoint pen
- 419: Band-Aid
- 420: banjo
- 421: baluster
- 422: barbell
- 423: barber chair
- 424: barbershop
- 425: barn
- 426: barometer
- 427: barrel
- 428: wheelbarrow
- 429: baseball
- 430: basketball
- 431: bassinet
- 432: bassoon
- 433: swimming cap
- 434: bath towel
- 435: bathtub
- 436: station wagon
- 437: lighthouse
- 438: beaker
- 439: military cap
- 440: beer bottle
- 441: beer glass
- 442: bell-cot
- 443: bib
- 444: tandem bicycle
- 445: bikini
- 446: ring binder
- 447: binoculars
- 448: birdhouse
- 449: boathouse
- 450: bobsleigh
- 451: bolo tie
- 452: poke bonnet
- 453: bookcase
- 454: bookstore
- 455: bottle cap
- 456: bow
- 457: bow tie
- 458: brass
- 459: bra
- 460: breakwater
- 461: breastplate
- 462: broom
- 463: bucket
- 464: buckle
- 465: bulletproof vest
- 466: high-speed train
- 467: butcher shop
- 468: taxicab
- 469: cauldron
- 470: candle
- 471: cannon
- 472: canoe
- 473: can opener
- 474: cardigan
- 475: car mirror
- 476: carousel
- 477: tool kit
- 478: carton
- 479: car wheel
- 480: automated teller machine
- 481: cassette
- 482: cassette player
- 483: castle
- 484: catamaran
- 485: CD player
- 486: cello
- 487: mobile phone
- 488: chain
- 489: chain-link fence
- 490: chain mail
- 491: chainsaw
- 492: chest
- 493: chiffonier
- 494: chime
- 495: china cabinet
- 496: Christmas stocking
- 497: church
- 498: movie theater
- 499: cleaver
- 500: cliff dwelling
- 501: cloak
- 502: clogs
- 503: cocktail shaker
- 504: coffee mug
- 505: coffeemaker
- 506: coil
- 507: combination lock
- 508: computer keyboard
- 509: confectionery store
- 510: container ship
- 511: convertible
- 512: corkscrew
- 513: cornet
- 514: cowboy boot
- 515: cowboy hat
- 516: cradle
- 517: crane (machine)
- 518: crash helmet
- 519: crate
- 520: infant bed
- 521: Crock Pot
- 522: croquet ball
- 523: crutch
- 524: cuirass
- 525: dam
- 526: desk
- 527: desktop computer
- 528: rotary dial telephone
- 529: diaper
- 530: digital clock
- 531: digital watch
- 532: dining table
- 533: dishcloth
- 534: dishwasher
- 535: disc brake
- 536: dock
- 537: dog sled
- 538: dome
- 539: doormat
- 540: drilling rig
- 541: drum
- 542: drumstick
- 543: dumbbell
- 544: Dutch oven
- 545: electric fan
- 546: electric guitar
- 547: electric locomotive
- 548: entertainment center
- 549: envelope
- 550: espresso machine
- 551: face powder
- 552: feather boa
- 553: filing cabinet
- 554: fireboat
- 555: fire engine
- 556: fire screen sheet
- 557: flagpole
- 558: flute
- 559: folding chair
- 560: football helmet
- 561: forklift
- 562: fountain
- 563: fountain pen
- 564: four-poster bed
- 565: freight car
- 566: French horn
- 567: frying pan
- 568: fur coat
- 569: garbage truck
- 570: gas mask
- 571: gas pump
- 572: goblet
- 573: go-kart
- 574: golf ball
- 575: golf cart
- 576: gondola
- 577: gong
- 578: gown
- 579: grand piano
- 580: greenhouse
- 581: grille
- 582: grocery store
- 583: guillotine
- 584: barrette
- 585: hair spray
- 586: half-track
- 587: hammer
- 588: hamper
- 589: hair dryer
- 590: hand-held computer
- 591: handkerchief
- 592: hard disk drive
- 593: harmonica
- 594: harp
- 595: harvester
- 596: hatchet
- 597: holster
- 598: home theater
- 599: honeycomb
- 600: hook
- 601: hoop skirt
- 602: horizontal bar
- 603: horse-drawn vehicle
- 604: hourglass
- 605: iPod
- 606: clothes iron
- 607: jack-o'-lantern
- 608: jeans
- 609: jeep
- 610: T-shirt
- 611: jigsaw puzzle
- 612: pulled rickshaw
- 613: joystick
- 614: kimono
- 615: knee pad
- 616: knot
- 617: lab coat
- 618: ladle
- 619: lampshade
- 620: laptop computer
- 621: lawn mower
- 622: lens cap
- 623: paper knife
- 624: library
- 625: lifeboat
- 626: lighter
- 627: limousine
- 628: ocean liner
- 629: lipstick
- 630: slip-on shoe
- 631: lotion
- 632: speaker
- 633: loupe
- 634: sawmill
- 635: magnetic compass
- 636: mail bag
- 637: mailbox
- 638: tights
- 639: tank suit
- 640: manhole cover
- 641: maraca
- 642: marimba
- 643: mask
- 644: match
- 645: maypole
- 646: maze
- 647: measuring cup
- 648: medicine chest
- 649: megalith
- 650: microphone
- 651: microwave oven
- 652: military uniform
- 653: milk can
- 654: minibus
- 655: miniskirt
- 656: minivan
- 657: missile
- 658: mitten
- 659: mixing bowl
- 660: mobile home
- 661: Model T
- 662: modem
- 663: monastery
- 664: monitor
- 665: moped
- 666: mortar
- 667: square academic cap
- 668: mosque
- 669: mosquito net
- 670: scooter
- 671: mountain bike
- 672: tent
- 673: computer mouse
- 674: mousetrap
- 675: moving van
- 676: muzzle
- 677: nail
- 678: neck brace
- 679: necklace
- 680: nipple
- 681: notebook computer
- 682: obelisk
- 683: oboe
- 684: ocarina
- 685: odometer
- 686: oil filter
- 687: organ
- 688: oscilloscope
- 689: overskirt
- 690: bullock cart
- 691: oxygen mask
- 692: packet
- 693: paddle
- 694: paddle wheel
- 695: padlock
- 696: paintbrush
- 697: pajamas
- 698: palace
- 699: pan flute
- 700: paper towel
- 701: parachute
- 702: parallel bars
- 703: park bench
- 704: parking meter
- 705: passenger car
- 706: patio
- 707: payphone
- 708: pedestal
- 709: pencil case
- 710: pencil sharpener
- 711: perfume
- 712: Petri dish
- 713: photocopier
- 714: plectrum
- 715: Pickelhaube
- 716: picket fence
- 717: pickup truck
- 718: pier
- 719: piggy bank
- 720: pill bottle
- 721: pillow
- 722: ping-pong ball
- 723: pinwheel
- 724: pirate ship
- 725: pitcher
- 726: hand plane
- 727: planetarium
- 728: plastic bag
- 729: plate rack
- 730: plow
- 731: plunger
- 732: Polaroid camera
- 733: pole
- 734: police van
- 735: poncho
- 736: billiard table
- 737: soda bottle
- 738: pot
- 739: potter's wheel
- 740: power drill
- 741: prayer rug
- 742: printer
- 743: prison
- 744: projectile
- 745: projector
- 746: hockey puck
- 747: punching bag
- 748: purse
- 749: quill
- 750: quilt
- 751: race car
- 752: racket
- 753: radiator
- 754: radio
- 755: radio telescope
- 756: rain barrel
- 757: recreational vehicle
- 758: reel
- 759: reflex camera
- 760: refrigerator
- 761: remote control
- 762: restaurant
- 763: revolver
- 764: rifle
- 765: rocking chair
- 766: rotisserie
- 767: eraser
- 768: rugby ball
- 769: ruler
- 770: running shoe
- 771: safe
- 772: safety pin
- 773: salt shaker
- 774: sandal
- 775: sarong
- 776: saxophone
- 777: scabbard
- 778: weighing scale
- 779: school bus
- 780: schooner
- 781: scoreboard
- 782: CRT screen
- 783: screw
- 784: screwdriver
- 785: seat belt
- 786: sewing machine
- 787: shield
- 788: shoe store
- 789: shoji
- 790: shopping basket
- 791: shopping cart
- 792: shovel
- 793: shower cap
- 794: shower curtain
- 795: ski
- 796: ski mask
- 797: sleeping bag
- 798: slide rule
- 799: sliding door
- 800: slot machine
- 801: snorkel
- 802: snowmobile
- 803: snowplow
- 804: soap dispenser
- 805: soccer ball
- 806: sock
- 807: solar thermal collector
- 808: sombrero
- 809: soup bowl
- 810: space bar
- 811: space heater
- 812: space shuttle
- 813: spatula
- 814: motorboat
- 815: spider web
- 816: spindle
- 817: sports car
- 818: spotlight
- 819: stage
- 820: steam locomotive
- 821: through arch bridge
- 822: steel drum
- 823: stethoscope
- 824: scarf
- 825: stone wall
- 826: stopwatch
- 827: stove
- 828: strainer
- 829: tram
- 830: stretcher
- 831: couch
- 832: stupa
- 833: submarine
- 834: suit
- 835: sundial
- 836: sunglass
- 837: sunglasses
- 838: sunscreen
- 839: suspension bridge
- 840: mop
- 841: sweatshirt
- 842: swimsuit
- 843: swing
- 844: switch
- 845: syringe
- 846: table lamp
- 847: tank
- 848: tape player
- 849: teapot
- 850: teddy bear
- 851: television
- 852: tennis ball
- 853: thatched roof
- 854: front curtain
- 855: thimble
- 856: threshing machine
- 857: throne
- 858: tile roof
- 859: toaster
- 860: tobacco shop
- 861: toilet seat
- 862: torch
- 863: totem pole
- 864: tow truck
- 865: toy store
- 866: tractor
- 867: semi-trailer truck
- 868: tray
- 869: trench coat
- 870: tricycle
- 871: trimaran
- 872: tripod
- 873: triumphal arch
- 874: trolleybus
- 875: trombone
- 876: tub
- 877: turnstile
- 878: typewriter keyboard
- 879: umbrella
- 880: unicycle
- 881: upright piano
- 882: vacuum cleaner
- 883: vase
- 884: vault
- 885: velvet
- 886: vending machine
- 887: vestment
- 888: viaduct
- 889: violin
- 890: volleyball
- 891: waffle iron
- 892: wall clock
- 893: wallet
- 894: wardrobe
- 895: military aircraft
- 896: sink
- 897: washing machine
- 898: water bottle
- 899: water jug
- 900: water tower
- 901: whiskey jug
- 902: whistle
- 903: wig
- 904: window screen
- 905: window shade
- 906: Windsor tie
- 907: wine bottle
- 908: wing
- 909: wok
- 910: wooden spoon
- 911: wool
- 912: split-rail fence
- 913: shipwreck
- 914: yawl
- 915: yurt
- 916: website
- 917: comic book
- 918: crossword
- 919: traffic sign
- 920: traffic light
- 921: dust jacket
- 922: menu
- 923: plate
- 924: guacamole
- 925: consomme
- 926: hot pot
- 927: trifle
- 928: ice cream
- 929: ice pop
- 930: baguette
- 931: bagel
- 932: pretzel
- 933: cheeseburger
- 934: hot dog
- 935: mashed potato
- 936: cabbage
- 937: broccoli
- 938: cauliflower
- 939: zucchini
- 940: spaghetti squash
- 941: acorn squash
- 942: butternut squash
- 943: cucumber
- 944: artichoke
- 945: bell pepper
- 946: cardoon
- 947: mushroom
- 948: Granny Smith
- 949: strawberry
- 950: orange
- 951: lemon
- 952: fig
- 953: pineapple
- 954: banana
- 955: jackfruit
- 956: custard apple
- 957: pomegranate
- 958: hay
- 959: carbonara
- 960: chocolate syrup
- 961: dough
- 962: meatloaf
- 963: pizza
- 964: pot pie
- 965: burrito
- 966: red wine
- 967: espresso
- 968: cup
- 969: eggnog
- 970: alp
- 971: bubble
- 972: cliff
- 973: coral reef
- 974: geyser
- 975: lakeshore
- 976: promontory
- 977: shoal
- 978: seashore
- 979: valley
- 980: volcano
- 981: baseball player
- 982: bridegroom
- 983: scuba diver
- 984: rapeseed
- 985: daisy
- 986: yellow lady's slipper
- 987: corn
- 988: acorn
- 989: rose hip
- 990: horse chestnut seed
- 991: coral fungus
- 992: agaric
- 993: gyromitra
- 994: stinkhorn mushroom
- 995: earth star
- 996: hen-of-the-woods
- 997: bolete
- 998: ear
- 999: toilet paper
-
-# Download script/URL (optional)
-download: data/scripts/get_imagenet.sh
diff --git a/yolov5/data/ImageNet10.yaml b/yolov5/data/ImageNet10.yaml
deleted file mode 100644
index e50e588..0000000
--- a/yolov5/data/ImageNet10.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
-# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
-# Example usage: python classify/train.py --data imagenet
-# parent
-# ├── yolov5
-# └── datasets
-# └── imagenet10 ← downloads here
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/imagenet10 # dataset root dir
-train: train # train images (relative to 'path') 1281167 images
-val: val # val images (relative to 'path') 50000 images
-test: # test images (optional)
-
-# Classes
-names:
- 0: tench
- 1: goldfish
- 2: great white shark
- 3: tiger shark
- 4: hammerhead shark
- 5: electric ray
- 6: stingray
- 7: cock
- 8: hen
- 9: ostrich
-
-# Download script/URL (optional)
-download: data/scripts/get_imagenet10.sh
diff --git a/yolov5/data/ImageNet100.yaml b/yolov5/data/ImageNet100.yaml
deleted file mode 100644
index e3891bc..0000000
--- a/yolov5/data/ImageNet100.yaml
+++ /dev/null
@@ -1,120 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
-# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
-# Example usage: python classify/train.py --data imagenet
-# parent
-# ├── yolov5
-# └── datasets
-# └── imagenet100 ← downloads here
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/imagenet100 # dataset root dir
-train: train # train images (relative to 'path') 1281167 images
-val: val # val images (relative to 'path') 50000 images
-test: # test images (optional)
-
-# Classes
-names:
- 0: tench
- 1: goldfish
- 2: great white shark
- 3: tiger shark
- 4: hammerhead shark
- 5: electric ray
- 6: stingray
- 7: cock
- 8: hen
- 9: ostrich
- 10: brambling
- 11: goldfinch
- 12: house finch
- 13: junco
- 14: indigo bunting
- 15: American robin
- 16: bulbul
- 17: jay
- 18: magpie
- 19: chickadee
- 20: American dipper
- 21: kite
- 22: bald eagle
- 23: vulture
- 24: great grey owl
- 25: fire salamander
- 26: smooth newt
- 27: newt
- 28: spotted salamander
- 29: axolotl
- 30: American bullfrog
- 31: tree frog
- 32: tailed frog
- 33: loggerhead sea turtle
- 34: leatherback sea turtle
- 35: mud turtle
- 36: terrapin
- 37: box turtle
- 38: banded gecko
- 39: green iguana
- 40: Carolina anole
- 41: desert grassland whiptail lizard
- 42: agama
- 43: frilled-necked lizard
- 44: alligator lizard
- 45: Gila monster
- 46: European green lizard
- 47: chameleon
- 48: Komodo dragon
- 49: Nile crocodile
- 50: American alligator
- 51: triceratops
- 52: worm snake
- 53: ring-necked snake
- 54: eastern hog-nosed snake
- 55: smooth green snake
- 56: kingsnake
- 57: garter snake
- 58: water snake
- 59: vine snake
- 60: night snake
- 61: boa constrictor
- 62: African rock python
- 63: Indian cobra
- 64: green mamba
- 65: sea snake
- 66: Saharan horned viper
- 67: eastern diamondback rattlesnake
- 68: sidewinder
- 69: trilobite
- 70: harvestman
- 71: scorpion
- 72: yellow garden spider
- 73: barn spider
- 74: European garden spider
- 75: southern black widow
- 76: tarantula
- 77: wolf spider
- 78: tick
- 79: centipede
- 80: black grouse
- 81: ptarmigan
- 82: ruffed grouse
- 83: prairie grouse
- 84: peacock
- 85: quail
- 86: partridge
- 87: grey parrot
- 88: macaw
- 89: sulphur-crested cockatoo
- 90: lorikeet
- 91: coucal
- 92: bee eater
- 93: hornbill
- 94: hummingbird
- 95: jacamar
- 96: toucan
- 97: duck
- 98: red-breasted merganser
- 99: goose
-# Download script/URL (optional)
-download: data/scripts/get_imagenet100.sh
diff --git a/yolov5/data/ImageNet1000.yaml b/yolov5/data/ImageNet1000.yaml
deleted file mode 100644
index 8943d33..0000000
--- a/yolov5/data/ImageNet1000.yaml
+++ /dev/null
@@ -1,1021 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
-# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
-# Example usage: python classify/train.py --data imagenet
-# parent
-# ├── yolov5
-# └── datasets
-# └── imagenet100 ← downloads here
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/imagenet1000 # dataset root dir
-train: train # train images (relative to 'path') 1281167 images
-val: val # val images (relative to 'path') 50000 images
-test: # test images (optional)
-
-# Classes
-names:
- 0: tench
- 1: goldfish
- 2: great white shark
- 3: tiger shark
- 4: hammerhead shark
- 5: electric ray
- 6: stingray
- 7: cock
- 8: hen
- 9: ostrich
- 10: brambling
- 11: goldfinch
- 12: house finch
- 13: junco
- 14: indigo bunting
- 15: American robin
- 16: bulbul
- 17: jay
- 18: magpie
- 19: chickadee
- 20: American dipper
- 21: kite
- 22: bald eagle
- 23: vulture
- 24: great grey owl
- 25: fire salamander
- 26: smooth newt
- 27: newt
- 28: spotted salamander
- 29: axolotl
- 30: American bullfrog
- 31: tree frog
- 32: tailed frog
- 33: loggerhead sea turtle
- 34: leatherback sea turtle
- 35: mud turtle
- 36: terrapin
- 37: box turtle
- 38: banded gecko
- 39: green iguana
- 40: Carolina anole
- 41: desert grassland whiptail lizard
- 42: agama
- 43: frilled-necked lizard
- 44: alligator lizard
- 45: Gila monster
- 46: European green lizard
- 47: chameleon
- 48: Komodo dragon
- 49: Nile crocodile
- 50: American alligator
- 51: triceratops
- 52: worm snake
- 53: ring-necked snake
- 54: eastern hog-nosed snake
- 55: smooth green snake
- 56: kingsnake
- 57: garter snake
- 58: water snake
- 59: vine snake
- 60: night snake
- 61: boa constrictor
- 62: African rock python
- 63: Indian cobra
- 64: green mamba
- 65: sea snake
- 66: Saharan horned viper
- 67: eastern diamondback rattlesnake
- 68: sidewinder
- 69: trilobite
- 70: harvestman
- 71: scorpion
- 72: yellow garden spider
- 73: barn spider
- 74: European garden spider
- 75: southern black widow
- 76: tarantula
- 77: wolf spider
- 78: tick
- 79: centipede
- 80: black grouse
- 81: ptarmigan
- 82: ruffed grouse
- 83: prairie grouse
- 84: peacock
- 85: quail
- 86: partridge
- 87: grey parrot
- 88: macaw
- 89: sulphur-crested cockatoo
- 90: lorikeet
- 91: coucal
- 92: bee eater
- 93: hornbill
- 94: hummingbird
- 95: jacamar
- 96: toucan
- 97: duck
- 98: red-breasted merganser
- 99: goose
- 100: black swan
- 101: tusker
- 102: echidna
- 103: platypus
- 104: wallaby
- 105: koala
- 106: wombat
- 107: jellyfish
- 108: sea anemone
- 109: brain coral
- 110: flatworm
- 111: nematode
- 112: conch
- 113: snail
- 114: slug
- 115: sea slug
- 116: chiton
- 117: chambered nautilus
- 118: Dungeness crab
- 119: rock crab
- 120: fiddler crab
- 121: red king crab
- 122: American lobster
- 123: spiny lobster
- 124: crayfish
- 125: hermit crab
- 126: isopod
- 127: white stork
- 128: black stork
- 129: spoonbill
- 130: flamingo
- 131: little blue heron
- 132: great egret
- 133: bittern
- 134: crane (bird)
- 135: limpkin
- 136: common gallinule
- 137: American coot
- 138: bustard
- 139: ruddy turnstone
- 140: dunlin
- 141: common redshank
- 142: dowitcher
- 143: oystercatcher
- 144: pelican
- 145: king penguin
- 146: albatross
- 147: grey whale
- 148: killer whale
- 149: dugong
- 150: sea lion
- 151: Chihuahua
- 152: Japanese Chin
- 153: Maltese
- 154: Pekingese
- 155: Shih Tzu
- 156: King Charles Spaniel
- 157: Papillon
- 158: toy terrier
- 159: Rhodesian Ridgeback
- 160: Afghan Hound
- 161: Basset Hound
- 162: Beagle
- 163: Bloodhound
- 164: Bluetick Coonhound
- 165: Black and Tan Coonhound
- 166: Treeing Walker Coonhound
- 167: English foxhound
- 168: Redbone Coonhound
- 169: borzoi
- 170: Irish Wolfhound
- 171: Italian Greyhound
- 172: Whippet
- 173: Ibizan Hound
- 174: Norwegian Elkhound
- 175: Otterhound
- 176: Saluki
- 177: Scottish Deerhound
- 178: Weimaraner
- 179: Staffordshire Bull Terrier
- 180: American Staffordshire Terrier
- 181: Bedlington Terrier
- 182: Border Terrier
- 183: Kerry Blue Terrier
- 184: Irish Terrier
- 185: Norfolk Terrier
- 186: Norwich Terrier
- 187: Yorkshire Terrier
- 188: Wire Fox Terrier
- 189: Lakeland Terrier
- 190: Sealyham Terrier
- 191: Airedale Terrier
- 192: Cairn Terrier
- 193: Australian Terrier
- 194: Dandie Dinmont Terrier
- 195: Boston Terrier
- 196: Miniature Schnauzer
- 197: Giant Schnauzer
- 198: Standard Schnauzer
- 199: Scottish Terrier
- 200: Tibetan Terrier
- 201: Australian Silky Terrier
- 202: Soft-coated Wheaten Terrier
- 203: West Highland White Terrier
- 204: Lhasa Apso
- 205: Flat-Coated Retriever
- 206: Curly-coated Retriever
- 207: Golden Retriever
- 208: Labrador Retriever
- 209: Chesapeake Bay Retriever
- 210: German Shorthaired Pointer
- 211: Vizsla
- 212: English Setter
- 213: Irish Setter
- 214: Gordon Setter
- 215: Brittany
- 216: Clumber Spaniel
- 217: English Springer Spaniel
- 218: Welsh Springer Spaniel
- 219: Cocker Spaniels
- 220: Sussex Spaniel
- 221: Irish Water Spaniel
- 222: Kuvasz
- 223: Schipperke
- 224: Groenendael
- 225: Malinois
- 226: Briard
- 227: Australian Kelpie
- 228: Komondor
- 229: Old English Sheepdog
- 230: Shetland Sheepdog
- 231: collie
- 232: Border Collie
- 233: Bouvier des Flandres
- 234: Rottweiler
- 235: German Shepherd Dog
- 236: Dobermann
- 237: Miniature Pinscher
- 238: Greater Swiss Mountain Dog
- 239: Bernese Mountain Dog
- 240: Appenzeller Sennenhund
- 241: Entlebucher Sennenhund
- 242: Boxer
- 243: Bullmastiff
- 244: Tibetan Mastiff
- 245: French Bulldog
- 246: Great Dane
- 247: St. Bernard
- 248: husky
- 249: Alaskan Malamute
- 250: Siberian Husky
- 251: Dalmatian
- 252: Affenpinscher
- 253: Basenji
- 254: pug
- 255: Leonberger
- 256: Newfoundland
- 257: Pyrenean Mountain Dog
- 258: Samoyed
- 259: Pomeranian
- 260: Chow Chow
- 261: Keeshond
- 262: Griffon Bruxellois
- 263: Pembroke Welsh Corgi
- 264: Cardigan Welsh Corgi
- 265: Toy Poodle
- 266: Miniature Poodle
- 267: Standard Poodle
- 268: Mexican hairless dog
- 269: grey wolf
- 270: Alaskan tundra wolf
- 271: red wolf
- 272: coyote
- 273: dingo
- 274: dhole
- 275: African wild dog
- 276: hyena
- 277: red fox
- 278: kit fox
- 279: Arctic fox
- 280: grey fox
- 281: tabby cat
- 282: tiger cat
- 283: Persian cat
- 284: Siamese cat
- 285: Egyptian Mau
- 286: cougar
- 287: lynx
- 288: leopard
- 289: snow leopard
- 290: jaguar
- 291: lion
- 292: tiger
- 293: cheetah
- 294: brown bear
- 295: American black bear
- 296: polar bear
- 297: sloth bear
- 298: mongoose
- 299: meerkat
- 300: tiger beetle
- 301: ladybug
- 302: ground beetle
- 303: longhorn beetle
- 304: leaf beetle
- 305: dung beetle
- 306: rhinoceros beetle
- 307: weevil
- 308: fly
- 309: bee
- 310: ant
- 311: grasshopper
- 312: cricket
- 313: stick insect
- 314: cockroach
- 315: mantis
- 316: cicada
- 317: leafhopper
- 318: lacewing
- 319: dragonfly
- 320: damselfly
- 321: red admiral
- 322: ringlet
- 323: monarch butterfly
- 324: small white
- 325: sulphur butterfly
- 326: gossamer-winged butterfly
- 327: starfish
- 328: sea urchin
- 329: sea cucumber
- 330: cottontail rabbit
- 331: hare
- 332: Angora rabbit
- 333: hamster
- 334: porcupine
- 335: fox squirrel
- 336: marmot
- 337: beaver
- 338: guinea pig
- 339: common sorrel
- 340: zebra
- 341: pig
- 342: wild boar
- 343: warthog
- 344: hippopotamus
- 345: ox
- 346: water buffalo
- 347: bison
- 348: ram
- 349: bighorn sheep
- 350: Alpine ibex
- 351: hartebeest
- 352: impala
- 353: gazelle
- 354: dromedary
- 355: llama
- 356: weasel
- 357: mink
- 358: European polecat
- 359: black-footed ferret
- 360: otter
- 361: skunk
- 362: badger
- 363: armadillo
- 364: three-toed sloth
- 365: orangutan
- 366: gorilla
- 367: chimpanzee
- 368: gibbon
- 369: siamang
- 370: guenon
- 371: patas monkey
- 372: baboon
- 373: macaque
- 374: langur
- 375: black-and-white colobus
- 376: proboscis monkey
- 377: marmoset
- 378: white-headed capuchin
- 379: howler monkey
- 380: titi
- 381: Geoffroy's spider monkey
- 382: common squirrel monkey
- 383: ring-tailed lemur
- 384: indri
- 385: Asian elephant
- 386: African bush elephant
- 387: red panda
- 388: giant panda
- 389: snoek
- 390: eel
- 391: coho salmon
- 392: rock beauty
- 393: clownfish
- 394: sturgeon
- 395: garfish
- 396: lionfish
- 397: pufferfish
- 398: abacus
- 399: abaya
- 400: academic gown
- 401: accordion
- 402: acoustic guitar
- 403: aircraft carrier
- 404: airliner
- 405: airship
- 406: altar
- 407: ambulance
- 408: amphibious vehicle
- 409: analog clock
- 410: apiary
- 411: apron
- 412: waste container
- 413: assault rifle
- 414: backpack
- 415: bakery
- 416: balance beam
- 417: balloon
- 418: ballpoint pen
- 419: Band-Aid
- 420: banjo
- 421: baluster
- 422: barbell
- 423: barber chair
- 424: barbershop
- 425: barn
- 426: barometer
- 427: barrel
- 428: wheelbarrow
- 429: baseball
- 430: basketball
- 431: bassinet
- 432: bassoon
- 433: swimming cap
- 434: bath towel
- 435: bathtub
- 436: station wagon
- 437: lighthouse
- 438: beaker
- 439: military cap
- 440: beer bottle
- 441: beer glass
- 442: bell-cot
- 443: bib
- 444: tandem bicycle
- 445: bikini
- 446: ring binder
- 447: binoculars
- 448: birdhouse
- 449: boathouse
- 450: bobsleigh
- 451: bolo tie
- 452: poke bonnet
- 453: bookcase
- 454: bookstore
- 455: bottle cap
- 456: bow
- 457: bow tie
- 458: brass
- 459: bra
- 460: breakwater
- 461: breastplate
- 462: broom
- 463: bucket
- 464: buckle
- 465: bulletproof vest
- 466: high-speed train
- 467: butcher shop
- 468: taxicab
- 469: cauldron
- 470: candle
- 471: cannon
- 472: canoe
- 473: can opener
- 474: cardigan
- 475: car mirror
- 476: carousel
- 477: tool kit
- 478: carton
- 479: car wheel
- 480: automated teller machine
- 481: cassette
- 482: cassette player
- 483: castle
- 484: catamaran
- 485: CD player
- 486: cello
- 487: mobile phone
- 488: chain
- 489: chain-link fence
- 490: chain mail
- 491: chainsaw
- 492: chest
- 493: chiffonier
- 494: chime
- 495: china cabinet
- 496: Christmas stocking
- 497: church
- 498: movie theater
- 499: cleaver
- 500: cliff dwelling
- 501: cloak
- 502: clogs
- 503: cocktail shaker
- 504: coffee mug
- 505: coffeemaker
- 506: coil
- 507: combination lock
- 508: computer keyboard
- 509: confectionery store
- 510: container ship
- 511: convertible
- 512: corkscrew
- 513: cornet
- 514: cowboy boot
- 515: cowboy hat
- 516: cradle
- 517: crane (machine)
- 518: crash helmet
- 519: crate
- 520: infant bed
- 521: Crock Pot
- 522: croquet ball
- 523: crutch
- 524: cuirass
- 525: dam
- 526: desk
- 527: desktop computer
- 528: rotary dial telephone
- 529: diaper
- 530: digital clock
- 531: digital watch
- 532: dining table
- 533: dishcloth
- 534: dishwasher
- 535: disc brake
- 536: dock
- 537: dog sled
- 538: dome
- 539: doormat
- 540: drilling rig
- 541: drum
- 542: drumstick
- 543: dumbbell
- 544: Dutch oven
- 545: electric fan
- 546: electric guitar
- 547: electric locomotive
- 548: entertainment center
- 549: envelope
- 550: espresso machine
- 551: face powder
- 552: feather boa
- 553: filing cabinet
- 554: fireboat
- 555: fire engine
- 556: fire screen sheet
- 557: flagpole
- 558: flute
- 559: folding chair
- 560: football helmet
- 561: forklift
- 562: fountain
- 563: fountain pen
- 564: four-poster bed
- 565: freight car
- 566: French horn
- 567: frying pan
- 568: fur coat
- 569: garbage truck
- 570: gas mask
- 571: gas pump
- 572: goblet
- 573: go-kart
- 574: golf ball
- 575: golf cart
- 576: gondola
- 577: gong
- 578: gown
- 579: grand piano
- 580: greenhouse
- 581: grille
- 582: grocery store
- 583: guillotine
- 584: barrette
- 585: hair spray
- 586: half-track
- 587: hammer
- 588: hamper
- 589: hair dryer
- 590: hand-held computer
- 591: handkerchief
- 592: hard disk drive
- 593: harmonica
- 594: harp
- 595: harvester
- 596: hatchet
- 597: holster
- 598: home theater
- 599: honeycomb
- 600: hook
- 601: hoop skirt
- 602: horizontal bar
- 603: horse-drawn vehicle
- 604: hourglass
- 605: iPod
- 606: clothes iron
- 607: jack-o'-lantern
- 608: jeans
- 609: jeep
- 610: T-shirt
- 611: jigsaw puzzle
- 612: pulled rickshaw
- 613: joystick
- 614: kimono
- 615: knee pad
- 616: knot
- 617: lab coat
- 618: ladle
- 619: lampshade
- 620: laptop computer
- 621: lawn mower
- 622: lens cap
- 623: paper knife
- 624: library
- 625: lifeboat
- 626: lighter
- 627: limousine
- 628: ocean liner
- 629: lipstick
- 630: slip-on shoe
- 631: lotion
- 632: speaker
- 633: loupe
- 634: sawmill
- 635: magnetic compass
- 636: mail bag
- 637: mailbox
- 638: tights
- 639: tank suit
- 640: manhole cover
- 641: maraca
- 642: marimba
- 643: mask
- 644: match
- 645: maypole
- 646: maze
- 647: measuring cup
- 648: medicine chest
- 649: megalith
- 650: microphone
- 651: microwave oven
- 652: military uniform
- 653: milk can
- 654: minibus
- 655: miniskirt
- 656: minivan
- 657: missile
- 658: mitten
- 659: mixing bowl
- 660: mobile home
- 661: Model T
- 662: modem
- 663: monastery
- 664: monitor
- 665: moped
- 666: mortar
- 667: square academic cap
- 668: mosque
- 669: mosquito net
- 670: scooter
- 671: mountain bike
- 672: tent
- 673: computer mouse
- 674: mousetrap
- 675: moving van
- 676: muzzle
- 677: nail
- 678: neck brace
- 679: necklace
- 680: nipple
- 681: notebook computer
- 682: obelisk
- 683: oboe
- 684: ocarina
- 685: odometer
- 686: oil filter
- 687: organ
- 688: oscilloscope
- 689: overskirt
- 690: bullock cart
- 691: oxygen mask
- 692: packet
- 693: paddle
- 694: paddle wheel
- 695: padlock
- 696: paintbrush
- 697: pajamas
- 698: palace
- 699: pan flute
- 700: paper towel
- 701: parachute
- 702: parallel bars
- 703: park bench
- 704: parking meter
- 705: passenger car
- 706: patio
- 707: payphone
- 708: pedestal
- 709: pencil case
- 710: pencil sharpener
- 711: perfume
- 712: Petri dish
- 713: photocopier
- 714: plectrum
- 715: Pickelhaube
- 716: picket fence
- 717: pickup truck
- 718: pier
- 719: piggy bank
- 720: pill bottle
- 721: pillow
- 722: ping-pong ball
- 723: pinwheel
- 724: pirate ship
- 725: pitcher
- 726: hand plane
- 727: planetarium
- 728: plastic bag
- 729: plate rack
- 730: plow
- 731: plunger
- 732: Polaroid camera
- 733: pole
- 734: police van
- 735: poncho
- 736: billiard table
- 737: soda bottle
- 738: pot
- 739: potter's wheel
- 740: power drill
- 741: prayer rug
- 742: printer
- 743: prison
- 744: projectile
- 745: projector
- 746: hockey puck
- 747: punching bag
- 748: purse
- 749: quill
- 750: quilt
- 751: race car
- 752: racket
- 753: radiator
- 754: radio
- 755: radio telescope
- 756: rain barrel
- 757: recreational vehicle
- 758: reel
- 759: reflex camera
- 760: refrigerator
- 761: remote control
- 762: restaurant
- 763: revolver
- 764: rifle
- 765: rocking chair
- 766: rotisserie
- 767: eraser
- 768: rugby ball
- 769: ruler
- 770: running shoe
- 771: safe
- 772: safety pin
- 773: salt shaker
- 774: sandal
- 775: sarong
- 776: saxophone
- 777: scabbard
- 778: weighing scale
- 779: school bus
- 780: schooner
- 781: scoreboard
- 782: CRT screen
- 783: screw
- 784: screwdriver
- 785: seat belt
- 786: sewing machine
- 787: shield
- 788: shoe store
- 789: shoji
- 790: shopping basket
- 791: shopping cart
- 792: shovel
- 793: shower cap
- 794: shower curtain
- 795: ski
- 796: ski mask
- 797: sleeping bag
- 798: slide rule
- 799: sliding door
- 800: slot machine
- 801: snorkel
- 802: snowmobile
- 803: snowplow
- 804: soap dispenser
- 805: soccer ball
- 806: sock
- 807: solar thermal collector
- 808: sombrero
- 809: soup bowl
- 810: space bar
- 811: space heater
- 812: space shuttle
- 813: spatula
- 814: motorboat
- 815: spider web
- 816: spindle
- 817: sports car
- 818: spotlight
- 819: stage
- 820: steam locomotive
- 821: through arch bridge
- 822: steel drum
- 823: stethoscope
- 824: scarf
- 825: stone wall
- 826: stopwatch
- 827: stove
- 828: strainer
- 829: tram
- 830: stretcher
- 831: couch
- 832: stupa
- 833: submarine
- 834: suit
- 835: sundial
- 836: sunglass
- 837: sunglasses
- 838: sunscreen
- 839: suspension bridge
- 840: mop
- 841: sweatshirt
- 842: swimsuit
- 843: swing
- 844: switch
- 845: syringe
- 846: table lamp
- 847: tank
- 848: tape player
- 849: teapot
- 850: teddy bear
- 851: television
- 852: tennis ball
- 853: thatched roof
- 854: front curtain
- 855: thimble
- 856: threshing machine
- 857: throne
- 858: tile roof
- 859: toaster
- 860: tobacco shop
- 861: toilet seat
- 862: torch
- 863: totem pole
- 864: tow truck
- 865: toy store
- 866: tractor
- 867: semi-trailer truck
- 868: tray
- 869: trench coat
- 870: tricycle
- 871: trimaran
- 872: tripod
- 873: triumphal arch
- 874: trolleybus
- 875: trombone
- 876: tub
- 877: turnstile
- 878: typewriter keyboard
- 879: umbrella
- 880: unicycle
- 881: upright piano
- 882: vacuum cleaner
- 883: vase
- 884: vault
- 885: velvet
- 886: vending machine
- 887: vestment
- 888: viaduct
- 889: violin
- 890: volleyball
- 891: waffle iron
- 892: wall clock
- 893: wallet
- 894: wardrobe
- 895: military aircraft
- 896: sink
- 897: washing machine
- 898: water bottle
- 899: water jug
- 900: water tower
- 901: whiskey jug
- 902: whistle
- 903: wig
- 904: window screen
- 905: window shade
- 906: Windsor tie
- 907: wine bottle
- 908: wing
- 909: wok
- 910: wooden spoon
- 911: wool
- 912: split-rail fence
- 913: shipwreck
- 914: yawl
- 915: yurt
- 916: website
- 917: comic book
- 918: crossword
- 919: traffic sign
- 920: traffic light
- 921: dust jacket
- 922: menu
- 923: plate
- 924: guacamole
- 925: consomme
- 926: hot pot
- 927: trifle
- 928: ice cream
- 929: ice pop
- 930: baguette
- 931: bagel
- 932: pretzel
- 933: cheeseburger
- 934: hot dog
- 935: mashed potato
- 936: cabbage
- 937: broccoli
- 938: cauliflower
- 939: zucchini
- 940: spaghetti squash
- 941: acorn squash
- 942: butternut squash
- 943: cucumber
- 944: artichoke
- 945: bell pepper
- 946: cardoon
- 947: mushroom
- 948: Granny Smith
- 949: strawberry
- 950: orange
- 951: lemon
- 952: fig
- 953: pineapple
- 954: banana
- 955: jackfruit
- 956: custard apple
- 957: pomegranate
- 958: hay
- 959: carbonara
- 960: chocolate syrup
- 961: dough
- 962: meatloaf
- 963: pizza
- 964: pot pie
- 965: burrito
- 966: red wine
- 967: espresso
- 968: cup
- 969: eggnog
- 970: alp
- 971: bubble
- 972: cliff
- 973: coral reef
- 974: geyser
- 975: lakeshore
- 976: promontory
- 977: shoal
- 978: seashore
- 979: valley
- 980: volcano
- 981: baseball player
- 982: bridegroom
- 983: scuba diver
- 984: rapeseed
- 985: daisy
- 986: yellow lady's slipper
- 987: corn
- 988: acorn
- 989: rose hip
- 990: horse chestnut seed
- 991: coral fungus
- 992: agaric
- 993: gyromitra
- 994: stinkhorn mushroom
- 995: earth star
- 996: hen-of-the-woods
- 997: bolete
- 998: ear
- 999: toilet paper
-
-# Download script/URL (optional)
-download: data/scripts/get_imagenet1000.sh
diff --git a/yolov5/data/Objects365.yaml b/yolov5/data/Objects365.yaml
deleted file mode 100644
index 248b6c7..0000000
--- a/yolov5/data/Objects365.yaml
+++ /dev/null
@@ -1,437 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# Objects365 dataset https://www.objects365.org/ by Megvii
-# Example usage: python train.py --data Objects365.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/Objects365 # dataset root dir
-train: images/train # train images (relative to 'path') 1742289 images
-val: images/val # val images (relative to 'path') 80000 images
-test: # test images (optional)
-
-# Classes
-names:
- 0: Person
- 1: Sneakers
- 2: Chair
- 3: Other Shoes
- 4: Hat
- 5: Car
- 6: Lamp
- 7: Glasses
- 8: Bottle
- 9: Desk
- 10: Cup
- 11: Street Lights
- 12: Cabinet/shelf
- 13: Handbag/Satchel
- 14: Bracelet
- 15: Plate
- 16: Picture/Frame
- 17: Helmet
- 18: Book
- 19: Gloves
- 20: Storage box
- 21: Boat
- 22: Leather Shoes
- 23: Flower
- 24: Bench
- 25: Potted Plant
- 26: Bowl/Basin
- 27: Flag
- 28: Pillow
- 29: Boots
- 30: Vase
- 31: Microphone
- 32: Necklace
- 33: Ring
- 34: SUV
- 35: Wine Glass
- 36: Belt
- 37: Monitor/TV
- 38: Backpack
- 39: Umbrella
- 40: Traffic Light
- 41: Speaker
- 42: Watch
- 43: Tie
- 44: Trash bin Can
- 45: Slippers
- 46: Bicycle
- 47: Stool
- 48: Barrel/bucket
- 49: Van
- 50: Couch
- 51: Sandals
- 52: Basket
- 53: Drum
- 54: Pen/Pencil
- 55: Bus
- 56: Wild Bird
- 57: High Heels
- 58: Motorcycle
- 59: Guitar
- 60: Carpet
- 61: Cell Phone
- 62: Bread
- 63: Camera
- 64: Canned
- 65: Truck
- 66: Traffic cone
- 67: Cymbal
- 68: Lifesaver
- 69: Towel
- 70: Stuffed Toy
- 71: Candle
- 72: Sailboat
- 73: Laptop
- 74: Awning
- 75: Bed
- 76: Faucet
- 77: Tent
- 78: Horse
- 79: Mirror
- 80: Power outlet
- 81: Sink
- 82: Apple
- 83: Air Conditioner
- 84: Knife
- 85: Hockey Stick
- 86: Paddle
- 87: Pickup Truck
- 88: Fork
- 89: Traffic Sign
- 90: Balloon
- 91: Tripod
- 92: Dog
- 93: Spoon
- 94: Clock
- 95: Pot
- 96: Cow
- 97: Cake
- 98: Dinning Table
- 99: Sheep
- 100: Hanger
- 101: Blackboard/Whiteboard
- 102: Napkin
- 103: Other Fish
- 104: Orange/Tangerine
- 105: Toiletry
- 106: Keyboard
- 107: Tomato
- 108: Lantern
- 109: Machinery Vehicle
- 110: Fan
- 111: Green Vegetables
- 112: Banana
- 113: Baseball Glove
- 114: Airplane
- 115: Mouse
- 116: Train
- 117: Pumpkin
- 118: Soccer
- 119: Skiboard
- 120: Luggage
- 121: Nightstand
- 122: Tea pot
- 123: Telephone
- 124: Trolley
- 125: Head Phone
- 126: Sports Car
- 127: Stop Sign
- 128: Dessert
- 129: Scooter
- 130: Stroller
- 131: Crane
- 132: Remote
- 133: Refrigerator
- 134: Oven
- 135: Lemon
- 136: Duck
- 137: Baseball Bat
- 138: Surveillance Camera
- 139: Cat
- 140: Jug
- 141: Broccoli
- 142: Piano
- 143: Pizza
- 144: Elephant
- 145: Skateboard
- 146: Surfboard
- 147: Gun
- 148: Skating and Skiing shoes
- 149: Gas stove
- 150: Donut
- 151: Bow Tie
- 152: Carrot
- 153: Toilet
- 154: Kite
- 155: Strawberry
- 156: Other Balls
- 157: Shovel
- 158: Pepper
- 159: Computer Box
- 160: Toilet Paper
- 161: Cleaning Products
- 162: Chopsticks
- 163: Microwave
- 164: Pigeon
- 165: Baseball
- 166: Cutting/chopping Board
- 167: Coffee Table
- 168: Side Table
- 169: Scissors
- 170: Marker
- 171: Pie
- 172: Ladder
- 173: Snowboard
- 174: Cookies
- 175: Radiator
- 176: Fire Hydrant
- 177: Basketball
- 178: Zebra
- 179: Grape
- 180: Giraffe
- 181: Potato
- 182: Sausage
- 183: Tricycle
- 184: Violin
- 185: Egg
- 186: Fire Extinguisher
- 187: Candy
- 188: Fire Truck
- 189: Billiards
- 190: Converter
- 191: Bathtub
- 192: Wheelchair
- 193: Golf Club
- 194: Briefcase
- 195: Cucumber
- 196: Cigar/Cigarette
- 197: Paint Brush
- 198: Pear
- 199: Heavy Truck
- 200: Hamburger
- 201: Extractor
- 202: Extension Cord
- 203: Tong
- 204: Tennis Racket
- 205: Folder
- 206: American Football
- 207: earphone
- 208: Mask
- 209: Kettle
- 210: Tennis
- 211: Ship
- 212: Swing
- 213: Coffee Machine
- 214: Slide
- 215: Carriage
- 216: Onion
- 217: Green beans
- 218: Projector
- 219: Frisbee
- 220: Washing Machine/Drying Machine
- 221: Chicken
- 222: Printer
- 223: Watermelon
- 224: Saxophone
- 225: Tissue
- 226: Toothbrush
- 227: Ice cream
- 228: Hot-air balloon
- 229: Cello
- 230: French Fries
- 231: Scale
- 232: Trophy
- 233: Cabbage
- 234: Hot dog
- 235: Blender
- 236: Peach
- 237: Rice
- 238: Wallet/Purse
- 239: Volleyball
- 240: Deer
- 241: Goose
- 242: Tape
- 243: Tablet
- 244: Cosmetics
- 245: Trumpet
- 246: Pineapple
- 247: Golf Ball
- 248: Ambulance
- 249: Parking meter
- 250: Mango
- 251: Key
- 252: Hurdle
- 253: Fishing Rod
- 254: Medal
- 255: Flute
- 256: Brush
- 257: Penguin
- 258: Megaphone
- 259: Corn
- 260: Lettuce
- 261: Garlic
- 262: Swan
- 263: Helicopter
- 264: Green Onion
- 265: Sandwich
- 266: Nuts
- 267: Speed Limit Sign
- 268: Induction Cooker
- 269: Broom
- 270: Trombone
- 271: Plum
- 272: Rickshaw
- 273: Goldfish
- 274: Kiwi fruit
- 275: Router/modem
- 276: Poker Card
- 277: Toaster
- 278: Shrimp
- 279: Sushi
- 280: Cheese
- 281: Notepaper
- 282: Cherry
- 283: Pliers
- 284: CD
- 285: Pasta
- 286: Hammer
- 287: Cue
- 288: Avocado
- 289: Hamimelon
- 290: Flask
- 291: Mushroom
- 292: Screwdriver
- 293: Soap
- 294: Recorder
- 295: Bear
- 296: Eggplant
- 297: Board Eraser
- 298: Coconut
- 299: Tape Measure/Ruler
- 300: Pig
- 301: Showerhead
- 302: Globe
- 303: Chips
- 304: Steak
- 305: Crosswalk Sign
- 306: Stapler
- 307: Camel
- 308: Formula 1
- 309: Pomegranate
- 310: Dishwasher
- 311: Crab
- 312: Hoverboard
- 313: Meat ball
- 314: Rice Cooker
- 315: Tuba
- 316: Calculator
- 317: Papaya
- 318: Antelope
- 319: Parrot
- 320: Seal
- 321: Butterfly
- 322: Dumbbell
- 323: Donkey
- 324: Lion
- 325: Urinal
- 326: Dolphin
- 327: Electric Drill
- 328: Hair Dryer
- 329: Egg tart
- 330: Jellyfish
- 331: Treadmill
- 332: Lighter
- 333: Grapefruit
- 334: Game board
- 335: Mop
- 336: Radish
- 337: Baozi
- 338: Target
- 339: French
- 340: Spring Rolls
- 341: Monkey
- 342: Rabbit
- 343: Pencil Case
- 344: Yak
- 345: Red Cabbage
- 346: Binoculars
- 347: Asparagus
- 348: Barbell
- 349: Scallop
- 350: Noddles
- 351: Comb
- 352: Dumpling
- 353: Oyster
- 354: Table Tennis paddle
- 355: Cosmetics Brush/Eyeliner Pencil
- 356: Chainsaw
- 357: Eraser
- 358: Lobster
- 359: Durian
- 360: Okra
- 361: Lipstick
- 362: Cosmetics Mirror
- 363: Curling
- 364: Table Tennis
-
-# Download script/URL (optional) ---------------------------------------------------------------------------------------
-download: |
- from tqdm import tqdm
-
- from utils.general import Path, check_requirements, download, np, xyxy2xywhn
-
- check_requirements('pycocotools>=2.0')
- from pycocotools.coco import COCO
-
- # Make Directories
- dir = Path(yaml['path']) # dataset root dir
- for p in 'images', 'labels':
- (dir / p).mkdir(parents=True, exist_ok=True)
- for q in 'train', 'val':
- (dir / p / q).mkdir(parents=True, exist_ok=True)
-
- # Train, Val Splits
- for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
- print(f"Processing {split} in {patches} patches ...")
- images, labels = dir / 'images' / split, dir / 'labels' / split
-
- # Download
- url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
- if split == 'train':
- download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json
- download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8)
- elif split == 'val':
- download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json
- download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
- download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
-
- # Move
- for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
- f.rename(images / f.name) # move to /images/{split}
-
- # Labels
- coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
- names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
- for cid, cat in enumerate(names):
- catIds = coco.getCatIds(catNms=[cat])
- imgIds = coco.getImgIds(catIds=catIds)
- for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
- width, height = im["width"], im["height"]
- path = Path(im["file_name"]) # image filename
- try:
- with open(labels / path.with_suffix('.txt').name, 'a') as file:
- annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=False)
- for a in coco.loadAnns(annIds):
- x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
- xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
- x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped
- file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
- except Exception as e:
- print(e)
diff --git a/yolov5/data/SKU-110K.yaml b/yolov5/data/SKU-110K.yaml
deleted file mode 100644
index 695b89c..0000000
--- a/yolov5/data/SKU-110K.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
-# Example usage: python train.py --data SKU-110K.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── SKU-110K ← downloads here (13.6 GB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/SKU-110K # dataset root dir
-train: train.txt # train images (relative to 'path') 8219 images
-val: val.txt # val images (relative to 'path') 588 images
-test: test.txt # test images (optional) 2936 images
-
-# Classes
-names:
- 0: object
-
-# Download script/URL (optional) ---------------------------------------------------------------------------------------
-download: |
- import shutil
- from tqdm import tqdm
- from utils.general import np, pd, Path, download, xyxy2xywh
-
-
- # Download
- dir = Path(yaml['path']) # dataset root dir
- parent = Path(dir.parent) # download dir
- urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
- download(urls, dir=parent, delete=False)
-
- # Rename directories
- if dir.exists():
- shutil.rmtree(dir)
- (parent / 'SKU110K_fixed').rename(dir) # rename dir
- (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
-
- # Convert labels
- names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
- for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
- x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
- images, unique_images = x[:, 0], np.unique(x[:, 0])
- with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
- f.writelines(f'./images/{s}\n' for s in unique_images)
- for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
- cls = 0 # single-class dataset
- with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
- for r in x[images == im]:
- w, h = r[6], r[7] # image width, height
- xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
- f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
diff --git a/yolov5/data/VOC.yaml b/yolov5/data/VOC.yaml
deleted file mode 100644
index 9dad477..0000000
--- a/yolov5/data/VOC.yaml
+++ /dev/null
@@ -1,99 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
-# Example usage: python train.py --data VOC.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── VOC ← downloads here (2.8 GB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/VOC
-train: # train images (relative to 'path') 16551 images
- - images/train2012
- - images/train2007
- - images/val2012
- - images/val2007
-val: # val images (relative to 'path') 4952 images
- - images/test2007
-test: # test images (optional)
- - images/test2007
-
-# Classes
-names:
- 0: aeroplane
- 1: bicycle
- 2: bird
- 3: boat
- 4: bottle
- 5: bus
- 6: car
- 7: cat
- 8: chair
- 9: cow
- 10: diningtable
- 11: dog
- 12: horse
- 13: motorbike
- 14: person
- 15: pottedplant
- 16: sheep
- 17: sofa
- 18: train
- 19: tvmonitor
-
-# Download script/URL (optional) ---------------------------------------------------------------------------------------
-download: |
- import xml.etree.ElementTree as ET
-
- from tqdm import tqdm
- from utils.general import download, Path
-
-
- def convert_label(path, lb_path, year, image_id):
- def convert_box(size, box):
- dw, dh = 1. / size[0], 1. / size[1]
- x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
- return x * dw, y * dh, w * dw, h * dh
-
- in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
- out_file = open(lb_path, 'w')
- tree = ET.parse(in_file)
- root = tree.getroot()
- size = root.find('size')
- w = int(size.find('width').text)
- h = int(size.find('height').text)
-
- names = list(yaml['names'].values()) # names list
- for obj in root.iter('object'):
- cls = obj.find('name').text
- if cls in names and int(obj.find('difficult').text) != 1:
- xmlbox = obj.find('bndbox')
- bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
- cls_id = names.index(cls) # class id
- out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
-
-
- # Download
- dir = Path(yaml['path']) # dataset root dir
- url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'
- urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
- f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
- f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
- download(urls, dir=dir / 'images', delete=False, curl=True, threads=3)
-
- # Convert
- path = dir / 'images/VOCdevkit'
- for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
- imgs_path = dir / 'images' / f'{image_set}{year}'
- lbs_path = dir / 'labels' / f'{image_set}{year}'
- imgs_path.mkdir(exist_ok=True, parents=True)
- lbs_path.mkdir(exist_ok=True, parents=True)
-
- with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
- image_ids = f.read().strip().split()
- for id in tqdm(image_ids, desc=f'{image_set}{year}'):
- f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
- lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
- f.rename(imgs_path / f.name) # move image
- convert_label(path, lb_path, year, id) # convert labels to YOLO format
diff --git a/yolov5/data/VisDrone.yaml b/yolov5/data/VisDrone.yaml
deleted file mode 100644
index 637433b..0000000
--- a/yolov5/data/VisDrone.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
-# Example usage: python train.py --data VisDrone.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── VisDrone ← downloads here (2.3 GB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/VisDrone # dataset root dir
-train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
-val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
-test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
-
-# Classes
-names:
- 0: pedestrian
- 1: people
- 2: bicycle
- 3: car
- 4: van
- 5: truck
- 6: tricycle
- 7: awning-tricycle
- 8: bus
- 9: motor
-
-# Download script/URL (optional) ---------------------------------------------------------------------------------------
-download: |
- from utils.general import download, os, Path
-
- def visdrone2yolo(dir):
- from PIL import Image
- from tqdm import tqdm
-
- def convert_box(size, box):
- # Convert VisDrone box to YOLO xywh box
- dw = 1. / size[0]
- dh = 1. / size[1]
- return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
-
- (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
- pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
- for f in pbar:
- img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
- lines = []
- with open(f, 'r') as file: # read annotation.txt
- for row in [x.split(',') for x in file.read().strip().splitlines()]:
- if row[4] == '0': # VisDrone 'ignored regions' class 0
- continue
- cls = int(row[5]) - 1
- box = convert_box(img_size, tuple(map(int, row[:4])))
- lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
- with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
- fl.writelines(lines) # write label.txt
-
-
- # Download
- dir = Path(yaml['path']) # dataset root dir
- urls = ['https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-train.zip',
- 'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-val.zip',
- 'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-dev.zip',
- 'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-challenge.zip']
- download(urls, dir=dir, curl=True, threads=4)
-
- # Convert
- for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
- visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
diff --git a/yolov5/data/coco.yaml b/yolov5/data/coco.yaml
deleted file mode 100644
index 7f872e8..0000000
--- a/yolov5/data/coco.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# COCO 2017 dataset http://cocodataset.org by Microsoft
-# Example usage: python train.py --data coco.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── coco ← downloads here (20.1 GB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/coco # dataset root dir
-train: train2017.txt # train images (relative to 'path') 118287 images
-val: val2017.txt # val images (relative to 'path') 5000 images
-test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
-
-# Classes
-names:
- 0: person
- 1: bicycle
- 2: car
- 3: motorcycle
- 4: airplane
- 5: bus
- 6: train
- 7: truck
- 8: boat
- 9: traffic light
- 10: fire hydrant
- 11: stop sign
- 12: parking meter
- 13: bench
- 14: bird
- 15: cat
- 16: dog
- 17: horse
- 18: sheep
- 19: cow
- 20: elephant
- 21: bear
- 22: zebra
- 23: giraffe
- 24: backpack
- 25: umbrella
- 26: handbag
- 27: tie
- 28: suitcase
- 29: frisbee
- 30: skis
- 31: snowboard
- 32: sports ball
- 33: kite
- 34: baseball bat
- 35: baseball glove
- 36: skateboard
- 37: surfboard
- 38: tennis racket
- 39: bottle
- 40: wine glass
- 41: cup
- 42: fork
- 43: knife
- 44: spoon
- 45: bowl
- 46: banana
- 47: apple
- 48: sandwich
- 49: orange
- 50: broccoli
- 51: carrot
- 52: hot dog
- 53: pizza
- 54: donut
- 55: cake
- 56: chair
- 57: couch
- 58: potted plant
- 59: bed
- 60: dining table
- 61: toilet
- 62: tv
- 63: laptop
- 64: mouse
- 65: remote
- 66: keyboard
- 67: cell phone
- 68: microwave
- 69: oven
- 70: toaster
- 71: sink
- 72: refrigerator
- 73: book
- 74: clock
- 75: vase
- 76: scissors
- 77: teddy bear
- 78: hair drier
- 79: toothbrush
-
-# Download script/URL (optional)
-download: |
- from utils.general import download, Path
-
-
- # Download labels
- segments = False # segment or box labels
- dir = Path(yaml['path']) # dataset root dir
- url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'
- urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
- download(urls, dir=dir.parent)
-
- # Download data
- urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
- 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
- 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
- download(urls, dir=dir / 'images', threads=3)
diff --git a/yolov5/data/coco128-seg.yaml b/yolov5/data/coco128-seg.yaml
deleted file mode 100644
index fa618d8..0000000
--- a/yolov5/data/coco128-seg.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# COCO128-seg dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
-# Example usage: python train.py --data coco128.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── coco128-seg ← downloads here (7 MB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/coco128-seg # dataset root dir
-train: images/train2017 # train images (relative to 'path') 128 images
-val: images/train2017 # val images (relative to 'path') 128 images
-test: # test images (optional)
-
-# Classes
-names:
- 0: person
- 1: bicycle
- 2: car
- 3: motorcycle
- 4: airplane
- 5: bus
- 6: train
- 7: truck
- 8: boat
- 9: traffic light
- 10: fire hydrant
- 11: stop sign
- 12: parking meter
- 13: bench
- 14: bird
- 15: cat
- 16: dog
- 17: horse
- 18: sheep
- 19: cow
- 20: elephant
- 21: bear
- 22: zebra
- 23: giraffe
- 24: backpack
- 25: umbrella
- 26: handbag
- 27: tie
- 28: suitcase
- 29: frisbee
- 30: skis
- 31: snowboard
- 32: sports ball
- 33: kite
- 34: baseball bat
- 35: baseball glove
- 36: skateboard
- 37: surfboard
- 38: tennis racket
- 39: bottle
- 40: wine glass
- 41: cup
- 42: fork
- 43: knife
- 44: spoon
- 45: bowl
- 46: banana
- 47: apple
- 48: sandwich
- 49: orange
- 50: broccoli
- 51: carrot
- 52: hot dog
- 53: pizza
- 54: donut
- 55: cake
- 56: chair
- 57: couch
- 58: potted plant
- 59: bed
- 60: dining table
- 61: toilet
- 62: tv
- 63: laptop
- 64: mouse
- 65: remote
- 66: keyboard
- 67: cell phone
- 68: microwave
- 69: oven
- 70: toaster
- 71: sink
- 72: refrigerator
- 73: book
- 74: clock
- 75: vase
- 76: scissors
- 77: teddy bear
- 78: hair drier
- 79: toothbrush
-
-# Download script/URL (optional)
-download: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128-seg.zip
diff --git a/yolov5/data/coco128.yaml b/yolov5/data/coco128.yaml
deleted file mode 100644
index e81fb1f..0000000
--- a/yolov5/data/coco128.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# COCO128 dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
-# Example usage: python train.py --data coco128.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── coco128 ← downloads here (7 MB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/coco128 # dataset root dir
-train: images/train2017 # train images (relative to 'path') 128 images
-val: images/train2017 # val images (relative to 'path') 128 images
-test: # test images (optional)
-
-# Classes
-names:
- 0: person
- 1: bicycle
- 2: car
- 3: motorcycle
- 4: airplane
- 5: bus
- 6: train
- 7: truck
- 8: boat
- 9: traffic light
- 10: fire hydrant
- 11: stop sign
- 12: parking meter
- 13: bench
- 14: bird
- 15: cat
- 16: dog
- 17: horse
- 18: sheep
- 19: cow
- 20: elephant
- 21: bear
- 22: zebra
- 23: giraffe
- 24: backpack
- 25: umbrella
- 26: handbag
- 27: tie
- 28: suitcase
- 29: frisbee
- 30: skis
- 31: snowboard
- 32: sports ball
- 33: kite
- 34: baseball bat
- 35: baseball glove
- 36: skateboard
- 37: surfboard
- 38: tennis racket
- 39: bottle
- 40: wine glass
- 41: cup
- 42: fork
- 43: knife
- 44: spoon
- 45: bowl
- 46: banana
- 47: apple
- 48: sandwich
- 49: orange
- 50: broccoli
- 51: carrot
- 52: hot dog
- 53: pizza
- 54: donut
- 55: cake
- 56: chair
- 57: couch
- 58: potted plant
- 59: bed
- 60: dining table
- 61: toilet
- 62: tv
- 63: laptop
- 64: mouse
- 65: remote
- 66: keyboard
- 67: cell phone
- 68: microwave
- 69: oven
- 70: toaster
- 71: sink
- 72: refrigerator
- 73: book
- 74: clock
- 75: vase
- 76: scissors
- 77: teddy bear
- 78: hair drier
- 79: toothbrush
-
-# Download script/URL (optional)
-download: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128.zip
diff --git a/yolov5/data/images/bus.jpg b/yolov5/data/images/bus.jpg
deleted file mode 100644
index b43e311..0000000
Binary files a/yolov5/data/images/bus.jpg and /dev/null differ
diff --git a/yolov5/data/images/zidane.jpg b/yolov5/data/images/zidane.jpg
deleted file mode 100644
index 92d72ea..0000000
Binary files a/yolov5/data/images/zidane.jpg and /dev/null differ
diff --git a/yolov5/data/xView.yaml b/yolov5/data/xView.yaml
deleted file mode 100644
index 6bea763..0000000
--- a/yolov5/data/xView.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-
-# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
-# -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
-# Example usage: python train.py --data xView.yaml
-# parent
-# ├── yolov5
-# └── datasets
-# └── xView ← downloads here (20.7 GB)
-
-# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
-path: ../datasets/xView # dataset root dir
-train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
-val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
-
-# Classes
-names:
- 0: Fixed-wing Aircraft
- 1: Small Aircraft
- 2: Cargo Plane
- 3: Helicopter
- 4: Passenger Vehicle
- 5: Small Car
- 6: Bus
- 7: Pickup Truck
- 8: Utility Truck
- 9: Truck
- 10: Cargo Truck
- 11: Truck w/Box
- 12: Truck Tractor
- 13: Trailer
- 14: Truck w/Flatbed
- 15: Truck w/Liquid
- 16: Crane Truck
- 17: Railway Vehicle
- 18: Passenger Car
- 19: Cargo Car
- 20: Flat Car
- 21: Tank car
- 22: Locomotive
- 23: Maritime Vessel
- 24: Motorboat
- 25: Sailboat
- 26: Tugboat
- 27: Barge
- 28: Fishing Vessel
- 29: Ferry
- 30: Yacht
- 31: Container Ship
- 32: Oil Tanker
- 33: Engineering Vehicle
- 34: Tower crane
- 35: Container Crane
- 36: Reach Stacker
- 37: Straddle Carrier
- 38: Mobile Crane
- 39: Dump Truck
- 40: Haul Truck
- 41: Scraper/Tractor
- 42: Front loader/Bulldozer
- 43: Excavator
- 44: Cement Mixer
- 45: Ground Grader
- 46: Hut/Tent
- 47: Shed
- 48: Building
- 49: Aircraft Hangar
- 50: Damaged Building
- 51: Facility
- 52: Construction Site
- 53: Vehicle Lot
- 54: Helipad
- 55: Storage Tank
- 56: Shipping container lot
- 57: Shipping Container
- 58: Pylon
- 59: Tower
-
-# Download script/URL (optional) ---------------------------------------------------------------------------------------
-download: |
- import json
- import os
- from pathlib import Path
-
- import numpy as np
- from PIL import Image
- from tqdm import tqdm
-
- from utils.dataloaders import autosplit
- from utils.general import download, xyxy2xywhn
-
-
- def convert_labels(fname=Path('xView/xView_train.geojson')):
- # Convert xView geoJSON labels to YOLO format
- path = fname.parent
- with open(fname) as f:
- print(f'Loading {fname}...')
- data = json.load(f)
-
- # Make dirs
- labels = Path(path / 'labels' / 'train')
- os.system(f'rm -rf {labels}')
- labels.mkdir(parents=True, exist_ok=True)
-
- # xView classes 11-94 to 0-59
- xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11,
- 12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1,
- 29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46,
- 47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]
-
- shapes = {}
- for feature in tqdm(data['features'], desc=f'Converting {fname}'):
- p = feature['properties']
- if p['bounds_imcoords']:
- id = p['image_id']
- file = path / 'train_images' / id
- if file.exists(): # 1395.tif missing
- try:
- box = np.array([int(num) for num in p['bounds_imcoords'].split(",")])
- assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}'
- cls = p['type_id']
- cls = xview_class2index[int(cls)] # xView class to 0-60
- assert 59 >= cls >= 0, f'incorrect class index {cls}'
-
- # Write YOLO label
- if id not in shapes:
- shapes[id] = Image.open(file).size
- box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
- with open((labels / id).with_suffix('.txt'), 'a') as f:
- f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
- except Exception as e:
- print(f'WARNING: skipping one label for {file}: {e}')
-
-
- # Download manually from https://challenge.xviewdataset.org
- dir = Path(yaml['path']) # dataset root dir
- # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels
- # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images
- # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels)
- # download(urls, dir=dir, delete=False)
-
- # Convert labels
- convert_labels(dir / 'xView_train.geojson')
-
- # Move images
- images = Path(dir / 'images')
- images.mkdir(parents=True, exist_ok=True)
- Path(dir / 'train_images').rename(dir / 'images' / 'train')
- Path(dir / 'val_images').rename(dir / 'images' / 'val')
-
- # Split
- autosplit(dir / 'images' / 'train')
diff --git a/yolov5/segment/predict.py b/yolov5/segment/predict.py
deleted file mode 100644
index e0e4336..0000000
--- a/yolov5/segment/predict.py
+++ /dev/null
@@ -1,307 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-"""
-Run YOLOv5 segmentation inference on images, videos, directories, streams, etc.
-
-Usage - sources:
- $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam
- img.jpg # image
- vid.mp4 # video
- screen # screenshot
- path/ # directory
- list.txt # list of images
- list.streams # list of streams
- 'path/*.jpg' # glob
- 'https://youtu.be/LNwODJXcvt4' # YouTube
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
-
-Usage - formats:
- $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch
- yolov5s-seg.torchscript # TorchScript
- yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s-seg_openvino_model # OpenVINO
- yolov5s-seg.engine # TensorRT
- yolov5s-seg.mlmodel # CoreML (macOS-only)
- yolov5s-seg_saved_model # TensorFlow SavedModel
- yolov5s-seg.pb # TensorFlow GraphDef
- yolov5s-seg.tflite # TensorFlow Lite
- yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
- yolov5s-seg_paddle_model # PaddlePaddle
-"""
-
-import argparse
-import os
-import platform
-import sys
-from pathlib import Path
-
-import torch
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-from ultralytics.utils.plotting import Annotator, colors, save_one_box
-
-from models.common import DetectMultiBackend
-from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
-from utils.general import (
- LOGGER,
- Profile,
- check_file,
- check_img_size,
- check_imshow,
- check_requirements,
- colorstr,
- cv2,
- increment_path,
- non_max_suppression,
- print_args,
- scale_boxes,
- scale_segments,
- strip_optimizer,
-)
-from utils.segment.general import masks2segments, process_mask, process_mask_native
-from utils.torch_utils import select_device, smart_inference_mode
-
-
-@smart_inference_mode()
-def run(
- weights=ROOT / "yolov5s-seg.pt", # model.pt path(s)
- source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam)
- data=ROOT / "data/coco128.yaml", # dataset.yaml path
- imgsz=(640, 640), # inference size (height, width)
- conf_thres=0.25, # confidence threshold
- iou_thres=0.45, # NMS IOU threshold
- max_det=1000, # maximum detections per image
- device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
- view_img=False, # show results
- save_txt=False, # save results to *.txt
- save_conf=False, # save confidences in --save-txt labels
- save_crop=False, # save cropped prediction boxes
- nosave=False, # do not save images/videos
- classes=None, # filter by class: --class 0, or --class 0 2 3
- agnostic_nms=False, # class-agnostic NMS
- augment=False, # augmented inference
- visualize=False, # visualize features
- update=False, # update all models
- project=ROOT / "runs/predict-seg", # save results to project/name
- name="exp", # save results to project/name
- exist_ok=False, # existing project/name ok, do not increment
- line_thickness=3, # bounding box thickness (pixels)
- hide_labels=False, # hide labels
- hide_conf=False, # hide confidences
- half=False, # use FP16 half-precision inference
- dnn=False, # use OpenCV DNN for ONNX inference
- vid_stride=1, # video frame-rate stride
- retina_masks=False,
-):
- """Run YOLOv5 segmentation inference on diverse sources including images, videos, directories, and streams."""
- source = str(source)
- save_img = not nosave and not source.endswith(".txt") # save inference images
- is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
- is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
- webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
- screenshot = source.lower().startswith("screen")
- if is_url and is_file:
- source = check_file(source) # download
-
- # Directories
- save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
- (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
-
- # Load model
- device = select_device(device)
- model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
- stride, names, pt = model.stride, model.names, model.pt
- imgsz = check_img_size(imgsz, s=stride) # check image size
-
- # Dataloader
- bs = 1 # batch_size
- if webcam:
- view_img = check_imshow(warn=True)
- dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
- bs = len(dataset)
- elif screenshot:
- dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
- else:
- dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
- vid_path, vid_writer = [None] * bs, [None] * bs
-
- # Run inference
- model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
- seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))
- for path, im, im0s, vid_cap, s in dataset:
- with dt[0]:
- im = torch.from_numpy(im).to(model.device)
- im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
- im /= 255 # 0 - 255 to 0.0 - 1.0
- if len(im.shape) == 3:
- im = im[None] # expand for batch dim
-
- # Inference
- with dt[1]:
- visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
- pred, proto = model(im, augment=augment, visualize=visualize)[:2]
-
- # NMS
- with dt[2]:
- pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32)
-
- # Second-stage classifier (optional)
- # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
-
- # Process predictions
- for i, det in enumerate(pred): # per image
- seen += 1
- if webcam: # batch_size >= 1
- p, im0, frame = path[i], im0s[i].copy(), dataset.count
- s += f"{i}: "
- else:
- p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
-
- p = Path(p) # to Path
- save_path = str(save_dir / p.name) # im.jpg
- txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt
- s += "{:g}x{:g} ".format(*im.shape[2:]) # print string
- imc = im0.copy() if save_crop else im0 # for save_crop
- annotator = Annotator(im0, line_width=line_thickness, example=str(names))
- if len(det):
- if retina_masks:
- # scale bbox first the crop masks
- det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
- masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC
- else:
- masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC
- det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
-
- # Segments
- if save_txt:
- segments = [
- scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True)
- for x in reversed(masks2segments(masks))
- ]
-
- # Print results
- for c in det[:, 5].unique():
- n = (det[:, 5] == c).sum() # detections per class
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
-
- # Mask plotting
- annotator.masks(
- masks,
- colors=[colors(x, True) for x in det[:, 5]],
- im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous()
- / 255
- if retina_masks
- else im[i],
- )
-
- # Write results
- for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])):
- if save_txt: # Write to file
- seg = segments[j].reshape(-1) # (n,2) to (n*2)
- line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format
- with open(f"{txt_path}.txt", "a") as f:
- f.write(("%g " * len(line)).rstrip() % line + "\n")
-
- if save_img or save_crop or view_img: # Add bbox to image
- c = int(cls) # integer class
- label = None if hide_labels else (names[c] if hide_conf else f"{names[c]} {conf:.2f}")
- annotator.box_label(xyxy, label, color=colors(c, True))
- # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)
- if save_crop:
- save_one_box(xyxy, imc, file=save_dir / "crops" / names[c] / f"{p.stem}.jpg", BGR=True)
-
- # Stream results
- im0 = annotator.result()
- if view_img:
- if platform.system() == "Linux" and p not in windows:
- windows.append(p)
- cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
- cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
- cv2.imshow(str(p), im0)
- if cv2.waitKey(1) == ord("q"): # 1 millisecond
- exit()
-
- # Save results (image with detections)
- if save_img:
- if dataset.mode == "image":
- cv2.imwrite(save_path, im0)
- else: # 'video' or 'stream'
- if vid_path[i] != save_path: # new video
- vid_path[i] = save_path
- if isinstance(vid_writer[i], cv2.VideoWriter):
- vid_writer[i].release() # release previous video writer
- if vid_cap: # video
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
- w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
- h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- else: # stream
- fps, w, h = 30, im0.shape[1], im0.shape[0]
- save_path = str(Path(save_path).with_suffix(".mp4")) # force *.mp4 suffix on results videos
- vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
- vid_writer[i].write(im0)
-
- # Print time (inference-only)
- LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms")
-
- # Print results
- t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image
- LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t)
- if save_txt or save_img:
- s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
- if update:
- strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
-
-
-def parse_opt():
- """Parses command-line options for YOLOv5 inference including model paths, data sources, inference settings, and
- output preferences.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-seg.pt", help="model path(s)")
- parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)")
- parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path")
- parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w")
- parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold")
- parser.add_argument("--iou-thres", type=float, default=0.45, help="NMS IoU threshold")
- parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image")
- parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
- parser.add_argument("--view-img", action="store_true", help="show results")
- parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
- parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
- parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes")
- parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
- parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
- parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS")
- parser.add_argument("--augment", action="store_true", help="augmented inference")
- parser.add_argument("--visualize", action="store_true", help="visualize features")
- parser.add_argument("--update", action="store_true", help="update all models")
- parser.add_argument("--project", default=ROOT / "runs/predict-seg", help="save results to project/name")
- parser.add_argument("--name", default="exp", help="save results to project/name")
- parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
- parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)")
- parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels")
- parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences")
- parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
- parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
- parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride")
- parser.add_argument("--retina-masks", action="store_true", help="whether to plot masks in native resolution")
- opt = parser.parse_args()
- opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
- print_args(vars(opt))
- return opt
-
-
-def main(opt):
- """Executes YOLOv5 model inference with given options, checking for requirements before launching."""
- check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
- run(**vars(opt))
-
-
-if __name__ == "__main__":
- opt = parse_opt()
- main(opt)
diff --git a/yolov5/segment/train.py b/yolov5/segment/train.py
deleted file mode 100644
index 815c97c..0000000
--- a/yolov5/segment/train.py
+++ /dev/null
@@ -1,764 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-"""
-Train a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5
-release.
-
-Usage - Single-GPU training:
- $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended)
- $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch
-
-Usage - Multi-GPU DDP training:
- $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
-
-Models: https://github.com/ultralytics/yolov5/tree/master/models
-Datasets: https://github.com/ultralytics/yolov5/tree/master/data
-Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
-"""
-
-import argparse
-import math
-import os
-import random
-import subprocess
-import sys
-import time
-from copy import deepcopy
-from datetime import datetime
-from pathlib import Path
-
-import numpy as np
-import torch
-import torch.distributed as dist
-import torch.nn as nn
-import yaml
-from torch.optim import lr_scheduler
-from tqdm import tqdm
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-import segment.val as validate # for end-of-epoch mAP
-from models.experimental import attempt_load
-from models.yolo import SegmentationModel
-from utils.autoanchor import check_anchors
-from utils.autobatch import check_train_batch_size
-from utils.callbacks import Callbacks
-from utils.downloads import attempt_download, is_url
-from utils.general import (
- LOGGER,
- TQDM_BAR_FORMAT,
- check_amp,
- check_dataset,
- check_file,
- check_git_info,
- check_git_status,
- check_img_size,
- check_requirements,
- check_suffix,
- check_yaml,
- colorstr,
- get_latest_run,
- increment_path,
- init_seeds,
- intersect_dicts,
- labels_to_class_weights,
- labels_to_image_weights,
- one_cycle,
- print_args,
- print_mutation,
- strip_optimizer,
- yaml_save,
-)
-from utils.loggers import GenericLogger
-from utils.plots import plot_evolve, plot_labels
-from utils.segment.dataloaders import create_dataloader
-from utils.segment.loss import ComputeLoss
-from utils.segment.metrics import KEYS, fitness
-from utils.segment.plots import plot_images_and_masks, plot_results_with_masks
-from utils.torch_utils import (
- EarlyStopping,
- ModelEMA,
- de_parallel,
- select_device,
- smart_DDP,
- smart_optimizer,
- smart_resume,
- torch_distributed_zero_first,
-)
-
-LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html
-RANK = int(os.getenv("RANK", -1))
-WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1))
-GIT_INFO = check_git_info()
-
-
-def train(hyp, opt, device, callbacks):
- """
- Trains the YOLOv5 model on a dataset, managing hyperparameters, model optimization, logging, and validation.
-
- `hyp` is path/to/hyp.yaml or hyp dictionary.
- """
- (
- save_dir,
- epochs,
- batch_size,
- weights,
- single_cls,
- evolve,
- data,
- cfg,
- resume,
- noval,
- nosave,
- workers,
- freeze,
- mask_ratio,
- ) = (
- Path(opt.save_dir),
- opt.epochs,
- opt.batch_size,
- opt.weights,
- opt.single_cls,
- opt.evolve,
- opt.data,
- opt.cfg,
- opt.resume,
- opt.noval,
- opt.nosave,
- opt.workers,
- opt.freeze,
- opt.mask_ratio,
- )
- # callbacks.run('on_pretrain_routine_start')
-
- # Directories
- w = save_dir / "weights" # weights dir
- (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
- last, best = w / "last.pt", w / "best.pt"
-
- # Hyperparameters
- if isinstance(hyp, str):
- with open(hyp, errors="ignore") as f:
- hyp = yaml.safe_load(f) # load hyps dict
- LOGGER.info(colorstr("hyperparameters: ") + ", ".join(f"{k}={v}" for k, v in hyp.items()))
- opt.hyp = hyp.copy() # for saving hyps to checkpoints
-
- # Save run settings
- if not evolve:
- yaml_save(save_dir / "hyp.yaml", hyp)
- yaml_save(save_dir / "opt.yaml", vars(opt))
-
- # Loggers
- data_dict = None
- if RANK in {-1, 0}:
- logger = GenericLogger(opt=opt, console_logger=LOGGER)
-
- # Config
- plots = not evolve and not opt.noplots # create plots
- overlap = not opt.no_overlap
- cuda = device.type != "cpu"
- init_seeds(opt.seed + 1 + RANK, deterministic=True)
- with torch_distributed_zero_first(LOCAL_RANK):
- data_dict = data_dict or check_dataset(data) # check if None
- train_path, val_path = data_dict["train"], data_dict["val"]
- nc = 1 if single_cls else int(data_dict["nc"]) # number of classes
- names = {0: "item"} if single_cls and len(data_dict["names"]) != 1 else data_dict["names"] # class names
- is_coco = isinstance(val_path, str) and val_path.endswith("coco/val2017.txt") # COCO dataset
-
- # Model
- check_suffix(weights, ".pt") # check weights
- pretrained = weights.endswith(".pt")
- if pretrained:
- with torch_distributed_zero_first(LOCAL_RANK):
- weights = attempt_download(weights) # download if not found locally
- ckpt = torch.load(weights, map_location="cpu") # load checkpoint to CPU to avoid CUDA memory leak
- model = SegmentationModel(cfg or ckpt["model"].yaml, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device)
- exclude = ["anchor"] if (cfg or hyp.get("anchors")) and not resume else [] # exclude keys
- csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32
- csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
- model.load_state_dict(csd, strict=False) # load
- LOGGER.info(f"Transferred {len(csd)}/{len(model.state_dict())} items from {weights}") # report
- else:
- model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create
- amp = check_amp(model) # check AMP
-
- # Freeze
- freeze = [f"model.{x}." for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
- for k, v in model.named_parameters():
- v.requires_grad = True # train all layers
- # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
- if any(x in k for x in freeze):
- LOGGER.info(f"freezing {k}")
- v.requires_grad = False
-
- # Image size
- gs = max(int(model.stride.max()), 32) # grid size (max stride)
- imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
-
- # Batch size
- if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
- batch_size = check_train_batch_size(model, imgsz, amp)
- logger.update_params({"batch_size": batch_size})
- # loggers.on_params_update({"batch_size": batch_size})
-
- # Optimizer
- nbs = 64 # nominal batch size
- accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
- hyp["weight_decay"] *= batch_size * accumulate / nbs # scale weight_decay
- optimizer = smart_optimizer(model, opt.optimizer, hyp["lr0"], hyp["momentum"], hyp["weight_decay"])
-
- # Scheduler
- if opt.cos_lr:
- lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
- else:
-
- def lf(x):
- """Linear learning rate scheduler decreasing from 1 to hyp['lrf'] over 'epochs'."""
- return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
-
- scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
-
- # EMA
- ema = ModelEMA(model) if RANK in {-1, 0} else None
-
- # Resume
- best_fitness, start_epoch = 0.0, 0
- if pretrained:
- if resume:
- best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
- del ckpt, csd
-
- # DP mode
- if cuda and RANK == -1 and torch.cuda.device_count() > 1:
- LOGGER.warning(
- "WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n"
- "See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started."
- )
- model = torch.nn.DataParallel(model)
-
- # SyncBatchNorm
- if opt.sync_bn and cuda and RANK != -1:
- model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
- LOGGER.info("Using SyncBatchNorm()")
-
- # Trainloader
- train_loader, dataset = create_dataloader(
- train_path,
- imgsz,
- batch_size // WORLD_SIZE,
- gs,
- single_cls,
- hyp=hyp,
- augment=True,
- cache=None if opt.cache == "val" else opt.cache,
- rect=opt.rect,
- rank=LOCAL_RANK,
- workers=workers,
- image_weights=opt.image_weights,
- quad=opt.quad,
- prefix=colorstr("train: "),
- shuffle=True,
- mask_downsample_ratio=mask_ratio,
- overlap_mask=overlap,
- )
- labels = np.concatenate(dataset.labels, 0)
- mlc = int(labels[:, 0].max()) # max label class
- assert mlc < nc, f"Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}"
-
- # Process 0
- if RANK in {-1, 0}:
- val_loader = create_dataloader(
- val_path,
- imgsz,
- batch_size // WORLD_SIZE * 2,
- gs,
- single_cls,
- hyp=hyp,
- cache=None if noval else opt.cache,
- rect=True,
- rank=-1,
- workers=workers * 2,
- pad=0.5,
- mask_downsample_ratio=mask_ratio,
- overlap_mask=overlap,
- prefix=colorstr("val: "),
- )[0]
-
- if not resume:
- if not opt.noautoanchor:
- check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz) # run AutoAnchor
- model.half().float() # pre-reduce anchor precision
-
- if plots:
- plot_labels(labels, names, save_dir)
- # callbacks.run('on_pretrain_routine_end', labels, names)
-
- # DDP mode
- if cuda and RANK != -1:
- model = smart_DDP(model)
-
- # Model attributes
- nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
- hyp["box"] *= 3 / nl # scale to layers
- hyp["cls"] *= nc / 80 * 3 / nl # scale to classes and layers
- hyp["obj"] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
- hyp["label_smoothing"] = opt.label_smoothing
- model.nc = nc # attach number of classes to model
- model.hyp = hyp # attach hyperparameters to model
- model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
- model.names = names
-
- # Start training
- t0 = time.time()
- nb = len(train_loader) # number of batches
- nw = max(round(hyp["warmup_epochs"] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
- # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
- last_opt_step = -1
- maps = np.zeros(nc) # mAP per class
- results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
- scheduler.last_epoch = start_epoch - 1 # do not move
- scaler = torch.cuda.amp.GradScaler(enabled=amp)
- stopper, stop = EarlyStopping(patience=opt.patience), False
- compute_loss = ComputeLoss(model, overlap=overlap) # init loss class
- # callbacks.run('on_train_start')
- LOGGER.info(
- f"Image sizes {imgsz} train, {imgsz} val\n"
- f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n"
- f"Logging results to {colorstr('bold', save_dir)}\n"
- f"Starting training for {epochs} epochs..."
- )
- for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
- # callbacks.run('on_train_epoch_start')
- model.train()
-
- # Update image weights (optional, single-GPU only)
- if opt.image_weights:
- cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
- iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
- dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
-
- # Update mosaic border (optional)
- # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
- # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
-
- mloss = torch.zeros(4, device=device) # mean losses
- if RANK != -1:
- train_loader.sampler.set_epoch(epoch)
- pbar = enumerate(train_loader)
- LOGGER.info(
- ("\n" + "%11s" * 8)
- % ("Epoch", "GPU_mem", "box_loss", "seg_loss", "obj_loss", "cls_loss", "Instances", "Size")
- )
- if RANK in {-1, 0}:
- pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
- optimizer.zero_grad()
- for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------
- # callbacks.run('on_train_batch_start')
- ni = i + nb * epoch # number integrated batches (since train start)
- imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
-
- # Warmup
- if ni <= nw:
- xi = [0, nw] # x interp
- # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
- accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
- for j, x in enumerate(optimizer.param_groups):
- # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
- x["lr"] = np.interp(ni, xi, [hyp["warmup_bias_lr"] if j == 0 else 0.0, x["initial_lr"] * lf(epoch)])
- if "momentum" in x:
- x["momentum"] = np.interp(ni, xi, [hyp["warmup_momentum"], hyp["momentum"]])
-
- # Multi-scale
- if opt.multi_scale:
- sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size
- sf = sz / max(imgs.shape[2:]) # scale factor
- if sf != 1:
- ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
- imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
-
- # Forward
- with torch.cuda.amp.autocast(amp):
- pred = model(imgs) # forward
- loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float())
- if RANK != -1:
- loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
- if opt.quad:
- loss *= 4.0
-
- # Backward
- scaler.scale(loss).backward()
-
- # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
- if ni - last_opt_step >= accumulate:
- scaler.unscale_(optimizer) # unscale gradients
- torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
- scaler.step(optimizer) # optimizer.step
- scaler.update()
- optimizer.zero_grad()
- if ema:
- ema.update(model)
- last_opt_step = ni
-
- # Log
- if RANK in {-1, 0}:
- mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
- mem = f"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
- pbar.set_description(
- ("%11s" * 2 + "%11.4g" * 6)
- % (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1])
- )
- # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths)
- # if callbacks.stop_training:
- # return
-
- # Mosaic plots
- if plots:
- if ni < 3:
- plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg")
- if ni == 10:
- files = sorted(save_dir.glob("train*.jpg"))
- logger.log_images(files, "Mosaics", epoch)
- # end batch ------------------------------------------------------------------------------------------------
-
- # Scheduler
- lr = [x["lr"] for x in optimizer.param_groups] # for loggers
- scheduler.step()
-
- if RANK in {-1, 0}:
- # mAP
- # callbacks.run('on_train_epoch_end', epoch=epoch)
- ema.update_attr(model, include=["yaml", "nc", "hyp", "names", "stride", "class_weights"])
- final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
- if not noval or final_epoch: # Calculate mAP
- results, maps, _ = validate.run(
- data_dict,
- batch_size=batch_size // WORLD_SIZE * 2,
- imgsz=imgsz,
- half=amp,
- model=ema.ema,
- single_cls=single_cls,
- dataloader=val_loader,
- save_dir=save_dir,
- plots=False,
- callbacks=callbacks,
- compute_loss=compute_loss,
- mask_downsample_ratio=mask_ratio,
- overlap=overlap,
- )
-
- # Update best mAP
- fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
- stop = stopper(epoch=epoch, fitness=fi) # early stop check
- if fi > best_fitness:
- best_fitness = fi
- log_vals = list(mloss) + list(results) + lr
- # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
- # Log val metrics and media
- metrics_dict = dict(zip(KEYS, log_vals))
- logger.log_metrics(metrics_dict, epoch)
-
- # Save model
- if (not nosave) or (final_epoch and not evolve): # if save
- ckpt = {
- "epoch": epoch,
- "best_fitness": best_fitness,
- "model": deepcopy(de_parallel(model)).half(),
- "ema": deepcopy(ema.ema).half(),
- "updates": ema.updates,
- "optimizer": optimizer.state_dict(),
- "opt": vars(opt),
- "git": GIT_INFO, # {remote, branch, commit} if a git repo
- "date": datetime.now().isoformat(),
- }
-
- # Save last, best and delete
- torch.save(ckpt, last)
- if best_fitness == fi:
- torch.save(ckpt, best)
- if opt.save_period > 0 and epoch % opt.save_period == 0:
- torch.save(ckpt, w / f"epoch{epoch}.pt")
- logger.log_model(w / f"epoch{epoch}.pt")
- del ckpt
- # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
-
- # EarlyStopping
- if RANK != -1: # if DDP training
- broadcast_list = [stop if RANK == 0 else None]
- dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
- if RANK != 0:
- stop = broadcast_list[0]
- if stop:
- break # must break all DDP ranks
-
- # end epoch ----------------------------------------------------------------------------------------------------
- # end training -----------------------------------------------------------------------------------------------------
- if RANK in {-1, 0}:
- LOGGER.info(f"\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.")
- for f in last, best:
- if f.exists():
- strip_optimizer(f) # strip optimizers
- if f is best:
- LOGGER.info(f"\nValidating {f}...")
- results, _, _ = validate.run(
- data_dict,
- batch_size=batch_size // WORLD_SIZE * 2,
- imgsz=imgsz,
- model=attempt_load(f, device).half(),
- iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
- single_cls=single_cls,
- dataloader=val_loader,
- save_dir=save_dir,
- save_json=is_coco,
- verbose=True,
- plots=plots,
- callbacks=callbacks,
- compute_loss=compute_loss,
- mask_downsample_ratio=mask_ratio,
- overlap=overlap,
- ) # val best model with plots
- if is_coco:
- # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
- metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr))
- logger.log_metrics(metrics_dict, epoch)
-
- # callbacks.run('on_train_end', last, best, epoch, results)
- # on train end callback using genericLogger
- logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs)
- if not opt.evolve:
- logger.log_model(best, epoch)
- if plots:
- plot_results_with_masks(file=save_dir / "results.csv") # save results.png
- files = ["results.png", "confusion_matrix.png", *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R"))]
- files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
- logger.log_images(files, "Results", epoch + 1)
- logger.log_images(sorted(save_dir.glob("val*.jpg")), "Validation", epoch + 1)
- torch.cuda.empty_cache()
- return results
-
-
-def parse_opt(known=False):
- """
- Parses command line arguments for training configurations, returning parsed arguments.
-
- Supports both known and unknown args.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument("--weights", type=str, default=ROOT / "yolov5s-seg.pt", help="initial weights path")
- parser.add_argument("--cfg", type=str, default="", help="model.yaml path")
- parser.add_argument("--data", type=str, default=ROOT / "data/coco128-seg.yaml", help="dataset.yaml path")
- parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch-low.yaml", help="hyperparameters path")
- parser.add_argument("--epochs", type=int, default=100, help="total training epochs")
- parser.add_argument("--batch-size", type=int, default=16, help="total batch size for all GPUs, -1 for autobatch")
- parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="train, val image size (pixels)")
- parser.add_argument("--rect", action="store_true", help="rectangular training")
- parser.add_argument("--resume", nargs="?", const=True, default=False, help="resume most recent training")
- parser.add_argument("--nosave", action="store_true", help="only save final checkpoint")
- parser.add_argument("--noval", action="store_true", help="only validate final epoch")
- parser.add_argument("--noautoanchor", action="store_true", help="disable AutoAnchor")
- parser.add_argument("--noplots", action="store_true", help="save no plot files")
- parser.add_argument("--evolve", type=int, nargs="?", const=300, help="evolve hyperparameters for x generations")
- parser.add_argument("--bucket", type=str, default="", help="gsutil bucket")
- parser.add_argument("--cache", type=str, nargs="?", const="ram", help="image --cache ram/disk")
- parser.add_argument("--image-weights", action="store_true", help="use weighted image selection for training")
- parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
- parser.add_argument("--multi-scale", action="store_true", help="vary img-size +/- 50%%")
- parser.add_argument("--single-cls", action="store_true", help="train multi-class data as single-class")
- parser.add_argument("--optimizer", type=str, choices=["SGD", "Adam", "AdamW"], default="SGD", help="optimizer")
- parser.add_argument("--sync-bn", action="store_true", help="use SyncBatchNorm, only available in DDP mode")
- parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
- parser.add_argument("--project", default=ROOT / "runs/train-seg", help="save to project/name")
- parser.add_argument("--name", default="exp", help="save to project/name")
- parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
- parser.add_argument("--quad", action="store_true", help="quad dataloader")
- parser.add_argument("--cos-lr", action="store_true", help="cosine LR scheduler")
- parser.add_argument("--label-smoothing", type=float, default=0.0, help="Label smoothing epsilon")
- parser.add_argument("--patience", type=int, default=100, help="EarlyStopping patience (epochs without improvement)")
- parser.add_argument("--freeze", nargs="+", type=int, default=[0], help="Freeze layers: backbone=10, first3=0 1 2")
- parser.add_argument("--save-period", type=int, default=-1, help="Save checkpoint every x epochs (disabled if < 1)")
- parser.add_argument("--seed", type=int, default=0, help="Global training seed")
- parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify")
-
- # Instance Segmentation Args
- parser.add_argument("--mask-ratio", type=int, default=4, help="Downsample the truth masks to saving memory")
- parser.add_argument("--no-overlap", action="store_true", help="Overlap masks train faster at slightly less mAP")
-
- return parser.parse_known_args()[0] if known else parser.parse_args()
-
-
-def main(opt, callbacks=Callbacks()):
- """Initializes training or evolution of YOLOv5 models based on provided configuration and options."""
- if RANK in {-1, 0}:
- print_args(vars(opt))
- check_git_status()
- check_requirements(ROOT / "requirements.txt")
-
- # Resume
- if opt.resume and not opt.evolve: # resume from specified or most recent last.pt
- last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
- opt_yaml = last.parent.parent / "opt.yaml" # train options yaml
- opt_data = opt.data # original dataset
- if opt_yaml.is_file():
- with open(opt_yaml, errors="ignore") as f:
- d = yaml.safe_load(f)
- else:
- d = torch.load(last, map_location="cpu")["opt"]
- opt = argparse.Namespace(**d) # replace
- opt.cfg, opt.weights, opt.resume = "", str(last), True # reinstate
- if is_url(opt_data):
- opt.data = check_file(opt_data) # avoid HUB resume auth timeout
- else:
- opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = (
- check_file(opt.data),
- check_yaml(opt.cfg),
- check_yaml(opt.hyp),
- str(opt.weights),
- str(opt.project),
- ) # checks
- assert len(opt.cfg) or len(opt.weights), "either --cfg or --weights must be specified"
- if opt.evolve:
- if opt.project == str(ROOT / "runs/train-seg"): # if default project name, rename to runs/evolve-seg
- opt.project = str(ROOT / "runs/evolve-seg")
- opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
- if opt.name == "cfg":
- opt.name = Path(opt.cfg).stem # use model.yaml as name
- opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
-
- # DDP mode
- device = select_device(opt.device, batch_size=opt.batch_size)
- if LOCAL_RANK != -1:
- msg = "is not compatible with YOLOv5 Multi-GPU DDP training"
- assert not opt.image_weights, f"--image-weights {msg}"
- assert not opt.evolve, f"--evolve {msg}"
- assert opt.batch_size != -1, f"AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size"
- assert opt.batch_size % WORLD_SIZE == 0, f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE"
- assert torch.cuda.device_count() > LOCAL_RANK, "insufficient CUDA devices for DDP command"
- torch.cuda.set_device(LOCAL_RANK)
- device = torch.device("cuda", LOCAL_RANK)
- dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
-
- # Train
- if not opt.evolve:
- train(opt.hyp, opt, device, callbacks)
-
- # Evolve hyperparameters (optional)
- else:
- # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
- meta = {
- "lr0": (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
- "lrf": (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
- "momentum": (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
- "weight_decay": (1, 0.0, 0.001), # optimizer weight decay
- "warmup_epochs": (1, 0.0, 5.0), # warmup epochs (fractions ok)
- "warmup_momentum": (1, 0.0, 0.95), # warmup initial momentum
- "warmup_bias_lr": (1, 0.0, 0.2), # warmup initial bias lr
- "box": (1, 0.02, 0.2), # box loss gain
- "cls": (1, 0.2, 4.0), # cls loss gain
- "cls_pw": (1, 0.5, 2.0), # cls BCELoss positive_weight
- "obj": (1, 0.2, 4.0), # obj loss gain (scale with pixels)
- "obj_pw": (1, 0.5, 2.0), # obj BCELoss positive_weight
- "iou_t": (0, 0.1, 0.7), # IoU training threshold
- "anchor_t": (1, 2.0, 8.0), # anchor-multiple threshold
- "anchors": (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
- "fl_gamma": (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
- "hsv_h": (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
- "hsv_s": (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
- "hsv_v": (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
- "degrees": (1, 0.0, 45.0), # image rotation (+/- deg)
- "translate": (1, 0.0, 0.9), # image translation (+/- fraction)
- "scale": (1, 0.0, 0.9), # image scale (+/- gain)
- "shear": (1, 0.0, 10.0), # image shear (+/- deg)
- "perspective": (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
- "flipud": (1, 0.0, 1.0), # image flip up-down (probability)
- "fliplr": (0, 0.0, 1.0), # image flip left-right (probability)
- "mosaic": (1, 0.0, 1.0), # image mixup (probability)
- "mixup": (1, 0.0, 1.0), # image mixup (probability)
- "copy_paste": (1, 0.0, 1.0),
- } # segment copy-paste (probability)
-
- with open(opt.hyp, errors="ignore") as f:
- hyp = yaml.safe_load(f) # load hyps dict
- if "anchors" not in hyp: # anchors commented in hyp.yaml
- hyp["anchors"] = 3
- if opt.noautoanchor:
- del hyp["anchors"], meta["anchors"]
- opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
- # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
- evolve_yaml, evolve_csv = save_dir / "hyp_evolve.yaml", save_dir / "evolve.csv"
- if opt.bucket:
- # download evolve.csv if exists
- subprocess.run(
- [
- "gsutil",
- "cp",
- f"gs://{opt.bucket}/evolve.csv",
- str(evolve_csv),
- ]
- )
-
- for _ in range(opt.evolve): # generations to evolve
- if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
- # Select parent(s)
- parent = "single" # parent selection method: 'single' or 'weighted'
- x = np.loadtxt(evolve_csv, ndmin=2, delimiter=",", skiprows=1)
- n = min(5, len(x)) # number of previous results to consider
- x = x[np.argsort(-fitness(x))][:n] # top n mutations
- w = fitness(x) - fitness(x).min() + 1e-6 # weights (sum > 0)
- if parent == "single" or len(x) == 1:
- # x = x[random.randint(0, n - 1)] # random selection
- x = x[random.choices(range(n), weights=w)[0]] # weighted selection
- elif parent == "weighted":
- x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
-
- # Mutate
- mp, s = 0.8, 0.2 # mutation probability, sigma
- npr = np.random
- npr.seed(int(time.time()))
- g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
- ng = len(meta)
- v = np.ones(ng)
- while all(v == 1): # mutate until a change occurs (prevent duplicates)
- v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
- for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
- hyp[k] = float(x[i + 12] * v[i]) # mutate
-
- # Constrain to limits
- for k, v in meta.items():
- hyp[k] = max(hyp[k], v[1]) # lower limit
- hyp[k] = min(hyp[k], v[2]) # upper limit
- hyp[k] = round(hyp[k], 5) # significant digits
-
- # Train mutation
- results = train(hyp.copy(), opt, device, callbacks)
- callbacks = Callbacks()
- # Write mutation results
- print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket)
-
- # Plot results
- plot_evolve(evolve_csv)
- LOGGER.info(
- f"Hyperparameter evolution finished {opt.evolve} generations\n"
- f"Results saved to {colorstr('bold', save_dir)}\n"
- f"Usage example: $ python train.py --hyp {evolve_yaml}"
- )
-
-
-def run(**kwargs):
- """
- Executes YOLOv5 training with given parameters, altering options programmatically; returns updated options.
-
- Example: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
- """
- opt = parse_opt(True)
- for k, v in kwargs.items():
- setattr(opt, k, v)
- main(opt)
- return opt
-
-
-if __name__ == "__main__":
- opt = parse_opt()
- main(opt)
diff --git a/yolov5/segment/tutorial.ipynb b/yolov5/segment/tutorial.ipynb
deleted file mode 100644
index bb5c1f9..0000000
--- a/yolov5/segment/tutorial.ipynb
+++ /dev/null
@@ -1,602 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "t6MPjfT5NrKQ"
- },
- "source": [
- "\n",
- "\n",
- "
\n",
- " 
\n",
- "\n",
- "\n",
- "
\n",
- "

\n",
- "

\n",
- "

\n",
- "
\n",
- "\n",
- "This
YOLOv5 🚀 notebook by
Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See
GitHub for community support or
contact us for professional support.\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "7mGmQbAO5pQb"
- },
- "source": [
- "# Setup\n",
- "\n",
- "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "wbvMlHd_QwMG",
- "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8"
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n"
- ]
- }
- ],
- "source": [
- "!git clone https://github.com/ultralytics/yolov5 # clone\n",
- "%cd yolov5\n",
- "%pip install -qr requirements.txt comet_ml # install\n",
- "\n",
- "import torch\n",
- "\n",
- "import utils\n",
- "\n",
- "display = utils.notebook_init() # checks"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "4JnkELT0cIJg"
- },
- "source": [
- "# 1. Predict\n",
- "\n",
- "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n",
- "\n",
- "```shell\n",
- "python segment/predict.py --source 0 # webcam\n",
- " img.jpg # image \n",
- " vid.mp4 # video\n",
- " screen # screenshot\n",
- " path/ # directory\n",
- " 'path/*.jpg' # glob\n",
- " 'https://youtu.be/LNwODJXcvt4' # YouTube\n",
- " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
- "```"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "zR9ZbuQCH7FX",
- "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n",
- "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
- "\n",
- "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n",
- "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n",
- "\n",
- "Fusing layers... \n",
- "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
- "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n",
- "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n",
- "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n",
- "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n"
- ]
- }
- ],
- "source": [
- "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n",
- "# display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "hkAzDWJ7cWTr"
- },
- "source": [
- " \n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "0eq1SMWl6Sfn"
- },
- "source": [
- "# 2. Validate\n",
- "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "WQPtK1QYVaD_",
- "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco2017labels-segments.zip ...\n",
- "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n",
- "######################################################################## 100.0%\n",
- "######################################################################## 100.0%\n"
- ]
- }
- ],
- "source": [
- "# Download COCO val\n",
- "!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "X58w8JLpMnjH",
- "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n",
- "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
- "\n",
- "Fusing layers... \n",
- "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
- "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n",
- "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n",
- " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n",
- " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n",
- "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n",
- "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n"
- ]
- }
- ],
- "source": [
- "# Validate YOLOv5s-seg on COCO val\n",
- "!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "ZY2VXXXu74w5"
- },
- "source": [
- "# 3. Train\n",
- "\n",
- "
\n",
- "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
- "
\n",
- "\n",
- "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/datasets/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n",
- "\n",
- "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
- "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
- "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n",
- "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n",
- "
\n",
- "\n",
- "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n",
- "\n",
- "## Train on Custom Data with Roboflow 🌟 NEW\n",
- "\n",
- "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n",
- "\n",
- "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n",
- "- Custom Training Notebook: [](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n",
- "
\n",
- "\n",
- "
Label images lightning fast (including with model-assisted labeling)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "i3oKtE4g-aNn"
- },
- "outputs": [],
- "source": [
- "# @title Select YOLOv5 🚀 logger {run: 'auto'}\n",
- "logger = \"Comet\" # @param ['Comet', 'ClearML', 'TensorBoard']\n",
- "\n",
- "if logger == \"Comet\":\n",
- " %pip install -q comet_ml\n",
- " import comet_ml\n",
- "\n",
- " comet_ml.init()\n",
- "elif logger == \"ClearML\":\n",
- " %pip install -q clearml\n",
- " import clearml\n",
- "\n",
- " clearml.browser_login()\n",
- "elif logger == \"TensorBoard\":\n",
- " %load_ext tensorboard\n",
- " %tensorboard --logdir runs/train"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "1NcFxRcFdJ_O",
- "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n",
- "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
- "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
- "\n",
- "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
- "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n",
- "\n",
- "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n",
- "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128-seg.zip to coco128-seg.zip...\n",
- "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n",
- "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n",
- "\n",
- " from n params module arguments \n",
- " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n",
- " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
- " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
- " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
- " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n",
- " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
- " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n",
- " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
- " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n",
- " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n",
- " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
- " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
- " 12 [-1, 6] 1 0 models.common.Concat [1] \n",
- " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
- " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
- " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
- " 16 [-1, 4] 1 0 models.common.Concat [1] \n",
- " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
- " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
- " 19 [-1, 14] 1 0 models.common.Concat [1] \n",
- " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
- " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
- " 22 [-1, 10] 1 0 models.common.Concat [1] \n",
- " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
- " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n",
- "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n",
- "\n",
- "Transferred 367/367 items from yolov5s-seg.pt\n",
- "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
- "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n",
- "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
- "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n",
- "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n",
- "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n",
- "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00, ?it/s]\n",
- "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 98.90it/s]\n",
- "\n",
- "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
- "Plotting labels to runs/train-seg/exp/labels.jpg... \n",
- "Image sizes 640 train, 640 val\n",
- "Using 2 dataloader workers\n",
- "Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n",
- "Starting training for 3 epochs...\n",
- "\n",
- " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
- " 0/2 4.92G 0.0417 0.04646 0.06066 0.02126 192 640: 100% 8/8 [00:08<00:00, 1.10s/it]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.81it/s]\n",
- " all 128 929 0.737 0.649 0.715 0.492 0.719 0.617 0.658 0.408\n",
- "\n",
- " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
- " 1/2 6.29G 0.04157 0.04503 0.05772 0.01777 208 640: 100% 8/8 [00:09<00:00, 1.21s/it]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.87it/s]\n",
- " all 128 929 0.756 0.674 0.738 0.506 0.725 0.64 0.68 0.422\n",
- "\n",
- " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
- " 2/2 6.29G 0.0425 0.04793 0.06784 0.01863 161 640: 100% 8/8 [00:03<00:00, 2.02it/s]\n",
- " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.88it/s]\n",
- " all 128 929 0.736 0.694 0.747 0.522 0.769 0.622 0.683 0.427\n",
- "\n",
- "3 epochs completed in 0.009 hours.\n",
- "Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n",
- "Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n",
- "\n",
- "Validating runs/train-seg/exp/weights/best.pt...\n",
- "Fusing layers... \n",
- "Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
- " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:06<00:00, 1.59s/it]\n",
- " all 128 929 0.738 0.694 0.746 0.522 0.759 0.625 0.682 0.426\n",
- " person 128 254 0.845 0.756 0.836 0.55 0.861 0.669 0.759 0.407\n",
- " bicycle 128 6 0.475 0.333 0.549 0.341 0.711 0.333 0.526 0.322\n",
- " car 128 46 0.612 0.565 0.539 0.257 0.555 0.435 0.477 0.171\n",
- " motorcycle 128 5 0.73 0.8 0.752 0.571 0.747 0.8 0.752 0.42\n",
- " airplane 128 6 1 0.943 0.995 0.732 0.92 0.833 0.839 0.555\n",
- " bus 128 7 0.677 0.714 0.722 0.653 0.711 0.714 0.722 0.593\n",
- " train 128 3 1 0.951 0.995 0.551 1 0.884 0.995 0.781\n",
- " truck 128 12 0.555 0.417 0.457 0.285 0.624 0.417 0.397 0.277\n",
- " boat 128 6 0.624 0.5 0.584 0.186 1 0.326 0.412 0.133\n",
- " traffic light 128 14 0.513 0.302 0.411 0.247 0.435 0.214 0.376 0.251\n",
- " stop sign 128 2 0.824 1 0.995 0.796 0.906 1 0.995 0.747\n",
- " bench 128 9 0.75 0.667 0.763 0.367 0.724 0.585 0.698 0.209\n",
- " bird 128 16 0.961 1 0.995 0.686 0.918 0.938 0.91 0.525\n",
- " cat 128 4 0.771 0.857 0.945 0.752 0.76 0.8 0.945 0.728\n",
- " dog 128 9 0.987 0.778 0.963 0.681 1 0.705 0.89 0.574\n",
- " horse 128 2 0.703 1 0.995 0.697 0.759 1 0.995 0.249\n",
- " elephant 128 17 0.916 0.882 0.93 0.691 0.811 0.765 0.829 0.537\n",
- " bear 128 1 0.664 1 0.995 0.995 0.701 1 0.995 0.895\n",
- " zebra 128 4 0.864 1 0.995 0.921 0.879 1 0.995 0.804\n",
- " giraffe 128 9 0.883 0.889 0.94 0.683 0.845 0.778 0.78 0.463\n",
- " backpack 128 6 1 0.59 0.701 0.372 1 0.474 0.52 0.252\n",
- " umbrella 128 18 0.654 0.839 0.887 0.52 0.517 0.556 0.427 0.229\n",
- " handbag 128 19 0.54 0.211 0.408 0.221 0.796 0.206 0.396 0.196\n",
- " tie 128 7 0.864 0.857 0.857 0.577 0.925 0.857 0.857 0.534\n",
- " suitcase 128 4 0.716 1 0.945 0.647 0.767 1 0.945 0.634\n",
- " frisbee 128 5 0.708 0.8 0.761 0.643 0.737 0.8 0.761 0.501\n",
- " skis 128 1 0.691 1 0.995 0.796 0.761 1 0.995 0.199\n",
- " snowboard 128 7 0.918 0.857 0.904 0.604 0.32 0.286 0.235 0.137\n",
- " sports ball 128 6 0.902 0.667 0.701 0.466 0.727 0.5 0.497 0.471\n",
- " kite 128 10 0.586 0.4 0.511 0.231 0.663 0.394 0.417 0.139\n",
- " baseball bat 128 4 0.359 0.5 0.401 0.169 0.631 0.5 0.526 0.133\n",
- " baseball glove 128 7 1 0.519 0.58 0.327 0.687 0.286 0.455 0.328\n",
- " skateboard 128 5 0.729 0.8 0.862 0.631 0.599 0.6 0.604 0.379\n",
- " tennis racket 128 7 0.57 0.714 0.645 0.448 0.608 0.714 0.645 0.412\n",
- " bottle 128 18 0.469 0.393 0.537 0.357 0.661 0.389 0.543 0.349\n",
- " wine glass 128 16 0.677 0.938 0.866 0.441 0.53 0.625 0.67 0.334\n",
- " cup 128 36 0.777 0.722 0.812 0.466 0.725 0.583 0.762 0.467\n",
- " fork 128 6 0.948 0.333 0.425 0.27 0.527 0.167 0.18 0.102\n",
- " knife 128 16 0.757 0.587 0.669 0.458 0.79 0.5 0.552 0.34\n",
- " spoon 128 22 0.74 0.364 0.559 0.269 0.925 0.364 0.513 0.213\n",
- " bowl 128 28 0.766 0.714 0.725 0.559 0.803 0.584 0.665 0.353\n",
- " banana 128 1 0.408 1 0.995 0.398 0.539 1 0.995 0.497\n",
- " sandwich 128 2 1 0 0.695 0.536 1 0 0.498 0.448\n",
- " orange 128 4 0.467 1 0.995 0.693 0.518 1 0.995 0.663\n",
- " broccoli 128 11 0.462 0.455 0.383 0.259 0.548 0.455 0.384 0.256\n",
- " carrot 128 24 0.631 0.875 0.77 0.533 0.757 0.909 0.853 0.499\n",
- " hot dog 128 2 0.555 1 0.995 0.995 0.578 1 0.995 0.796\n",
- " pizza 128 5 0.89 0.8 0.962 0.796 1 0.778 0.962 0.766\n",
- " donut 128 14 0.695 1 0.893 0.772 0.704 1 0.893 0.696\n",
- " cake 128 4 0.826 1 0.995 0.92 0.862 1 0.995 0.846\n",
- " chair 128 35 0.53 0.571 0.613 0.336 0.67 0.6 0.538 0.271\n",
- " couch 128 6 0.972 0.667 0.833 0.627 1 0.62 0.696 0.394\n",
- " potted plant 128 14 0.7 0.857 0.883 0.552 0.836 0.857 0.883 0.473\n",
- " bed 128 3 0.979 0.667 0.83 0.366 1 0 0.83 0.373\n",
- " dining table 128 13 0.775 0.308 0.505 0.364 0.644 0.231 0.25 0.0804\n",
- " toilet 128 2 0.836 1 0.995 0.846 0.887 1 0.995 0.797\n",
- " tv 128 2 0.6 1 0.995 0.846 0.655 1 0.995 0.896\n",
- " laptop 128 3 0.822 0.333 0.445 0.307 1 0 0.392 0.12\n",
- " mouse 128 2 1 0 0 0 1 0 0 0\n",
- " remote 128 8 0.745 0.5 0.62 0.459 0.821 0.5 0.624 0.449\n",
- " cell phone 128 8 0.686 0.375 0.502 0.272 0.488 0.25 0.28 0.132\n",
- " microwave 128 3 0.831 1 0.995 0.722 0.867 1 0.995 0.592\n",
- " oven 128 5 0.439 0.4 0.435 0.294 0.823 0.6 0.645 0.418\n",
- " sink 128 6 0.677 0.5 0.565 0.448 0.722 0.5 0.46 0.362\n",
- " refrigerator 128 5 0.533 0.8 0.783 0.524 0.558 0.8 0.783 0.527\n",
- " book 128 29 0.732 0.379 0.423 0.196 0.69 0.207 0.38 0.131\n",
- " clock 128 9 0.889 0.778 0.917 0.677 0.908 0.778 0.875 0.604\n",
- " vase 128 2 0.375 1 0.995 0.995 0.455 1 0.995 0.796\n",
- " scissors 128 1 1 0 0.0166 0.00166 1 0 0 0\n",
- " teddy bear 128 21 0.813 0.829 0.841 0.457 0.826 0.678 0.786 0.422\n",
- " toothbrush 128 5 0.806 1 0.995 0.733 0.991 1 0.995 0.628\n",
- "Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n"
- ]
- }
- ],
- "source": [
- "# Train YOLOv5s on COCO128 for 3 epochs\n",
- "!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "15glLzbQx5u0"
- },
- "source": [
- "# 4. Visualize"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "nWOsI5wJR1o3"
- },
- "source": [
- "## Comet Logging and Visualization 🌟 NEW\n",
- "\n",
- "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n",
- "\n",
- "Getting started is easy:\n",
- "```shell\n",
- "pip install comet_ml # 1. install\n",
- "export COMET_API_KEY= # 2. paste API key\n",
- "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n",
- "```\n",
- "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n",
- "[](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n",
- "\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Lay2WsTjNJzP"
- },
- "source": [
- "## ClearML Logging and Automation 🌟 NEW\n",
- "\n",
- "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n",
- "\n",
- "- `pip install clearml`\n",
- "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n",
- "\n",
- "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n",
- "\n",
- "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n",
- "\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "-WPvRbS5Swl6"
- },
- "source": [
- "## Local Logging\n",
- "\n",
- "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
- "\n",
- "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n",
- "\n",
- "
\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Zelyeqbyt3GD"
- },
- "source": [
- "# Environments\n",
- "\n",
- "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
- "\n",
- "- **Notebooks** with free GPU:
\n",
- "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n",
- "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n",
- "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/)
\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "6Qu7Iesl0p54"
- },
- "source": [
- "# Status\n",
- "\n",
- "\n",
- "\n",
- "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "IEijrePND_2I"
- },
- "source": [
- "# Appendix\n",
- "\n",
- "Additional content below."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "GMusP4OAxFu6"
- },
- "outputs": [],
- "source": [
- "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
- "\n",
- "model = torch.hub.load(\n",
- " \"ultralytics/yolov5\", \"yolov5s-seg\", force_reload=True, trust_repo=True\n",
- ") # or yolov5n - yolov5x6 or custom\n",
- "im = \"https://ultralytics.com/images/zidane.jpg\" # file, Path, PIL.Image, OpenCV, nparray, list\n",
- "results = model(im) # inference\n",
- "results.print() # or .show(), .save(), .crop(), .pandas(), etc."
- ]
- }
- ],
- "metadata": {
- "accelerator": "GPU",
- "colab": {
- "name": "YOLOv5 Segmentation Tutorial",
- "provenance": [],
- "toc_visible": true
- },
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.12"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/yolov5/segment/val.py b/yolov5/segment/val.py
deleted file mode 100644
index 29ca803..0000000
--- a/yolov5/segment/val.py
+++ /dev/null
@@ -1,522 +0,0 @@
-# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
-"""
-Validate a trained YOLOv5 segment model on a segment dataset.
-
-Usage:
- $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images)
- $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments
-
-Usage - formats:
- $ python segment/val.py --weights yolov5s-seg.pt # PyTorch
- yolov5s-seg.torchscript # TorchScript
- yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s-seg_openvino_label # OpenVINO
- yolov5s-seg.engine # TensorRT
- yolov5s-seg.mlmodel # CoreML (macOS-only)
- yolov5s-seg_saved_model # TensorFlow SavedModel
- yolov5s-seg.pb # TensorFlow GraphDef
- yolov5s-seg.tflite # TensorFlow Lite
- yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
- yolov5s-seg_paddle_model # PaddlePaddle
-"""
-
-import argparse
-import json
-import os
-import subprocess
-import sys
-from multiprocessing.pool import ThreadPool
-from pathlib import Path
-
-import numpy as np
-import torch
-from tqdm import tqdm
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-import torch.nn.functional as F
-
-from models.common import DetectMultiBackend
-from models.yolo import SegmentationModel
-from utils.callbacks import Callbacks
-from utils.general import (
- LOGGER,
- NUM_THREADS,
- TQDM_BAR_FORMAT,
- Profile,
- check_dataset,
- check_img_size,
- check_requirements,
- check_yaml,
- coco80_to_coco91_class,
- colorstr,
- increment_path,
- non_max_suppression,
- print_args,
- scale_boxes,
- xywh2xyxy,
- xyxy2xywh,
-)
-from utils.metrics import ConfusionMatrix, box_iou
-from utils.plots import output_to_target, plot_val_study
-from utils.segment.dataloaders import create_dataloader
-from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image
-from utils.segment.metrics import Metrics, ap_per_class_box_and_mask
-from utils.segment.plots import plot_images_and_masks
-from utils.torch_utils import de_parallel, select_device, smart_inference_mode
-
-
-def save_one_txt(predn, save_conf, shape, file):
- """Saves detection results in txt format; includes class, xywh (normalized), optionally confidence if `save_conf` is
- True.
- """
- gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
- for *xyxy, conf, cls in predn.tolist():
- xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
- line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
- with open(file, "a") as f:
- f.write(("%g " * len(line)).rstrip() % line + "\n")
-
-
-def save_one_json(predn, jdict, path, class_map, pred_masks):
- """
- Saves a JSON file with detection results including bounding boxes, category IDs, scores, and segmentation masks.
-
- Example JSON result: {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}.
- """
- from pycocotools.mask import encode
-
- def single_encode(x):
- """Encodes binary mask arrays into RLE (Run-Length Encoding) format for JSON serialization."""
- rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0]
- rle["counts"] = rle["counts"].decode("utf-8")
- return rle
-
- image_id = int(path.stem) if path.stem.isnumeric() else path.stem
- box = xyxy2xywh(predn[:, :4]) # xywh
- box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
- pred_masks = np.transpose(pred_masks, (2, 0, 1))
- with ThreadPool(NUM_THREADS) as pool:
- rles = pool.map(single_encode, pred_masks)
- for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
- jdict.append(
- {
- "image_id": image_id,
- "category_id": class_map[int(p[5])],
- "bbox": [round(x, 3) for x in b],
- "score": round(p[4], 5),
- "segmentation": rles[i],
- }
- )
-
-
-def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False):
- """
- Return correct prediction matrix
- Arguments:
- detections (array[N, 6]), x1, y1, x2, y2, conf, class
- labels (array[M, 5]), class, x1, y1, x2, y2
- Returns:
- correct (array[N, 10]), for 10 IoU levels.
- """
- if masks:
- if overlap:
- nl = len(labels)
- index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
- gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640)
- gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
- if gt_masks.shape[1:] != pred_masks.shape[1:]:
- gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0]
- gt_masks = gt_masks.gt_(0.5)
- iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
- else: # boxes
- iou = box_iou(labels[:, 1:], detections[:, :4])
-
- correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
- correct_class = labels[:, 0:1] == detections[:, 5]
- for i in range(len(iouv)):
- x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
- if x[0].shape[0]:
- matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
- if x[0].shape[0] > 1:
- matches = matches[matches[:, 2].argsort()[::-1]]
- matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
- # matches = matches[matches[:, 2].argsort()[::-1]]
- matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
- correct[matches[:, 1].astype(int), i] = True
- return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
-
-
-@smart_inference_mode()
-def run(
- data,
- weights=None, # model.pt path(s)
- batch_size=32, # batch size
- imgsz=640, # inference size (pixels)
- conf_thres=0.001, # confidence threshold
- iou_thres=0.6, # NMS IoU threshold
- max_det=300, # maximum detections per image
- task="val", # train, val, test, speed or study
- device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
- workers=8, # max dataloader workers (per RANK in DDP mode)
- single_cls=False, # treat as single-class dataset
- augment=False, # augmented inference
- verbose=False, # verbose output
- save_txt=False, # save results to *.txt
- save_hybrid=False, # save label+prediction hybrid results to *.txt
- save_conf=False, # save confidences in --save-txt labels
- save_json=False, # save a COCO-JSON results file
- project=ROOT / "runs/val-seg", # save to project/name
- name="exp", # save to project/name
- exist_ok=False, # existing project/name ok, do not increment
- half=True, # use FP16 half-precision inference
- dnn=False, # use OpenCV DNN for ONNX inference
- model=None,
- dataloader=None,
- save_dir=Path(""),
- plots=True,
- overlap=False,
- mask_downsample_ratio=1,
- compute_loss=None,
- callbacks=Callbacks(),
-):
- """Validates a YOLOv5 segmentation model on specified dataset, producing metrics, plots, and optional JSON
- output.
- """
- if save_json:
- check_requirements("pycocotools>=2.0.6")
- process = process_mask_native # more accurate
- else:
- process = process_mask # faster
-
- # Initialize/load model and set device
- training = model is not None
- if training: # called by train.py
- device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
- half &= device.type != "cpu" # half precision only supported on CUDA
- model.half() if half else model.float()
- nm = de_parallel(model).model[-1].nm # number of masks
- else: # called directly
- device = select_device(device, batch_size=batch_size)
-
- # Directories
- save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
- (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
-
- # Load model
- model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
- stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
- imgsz = check_img_size(imgsz, s=stride) # check image size
- half = model.fp16 # FP16 supported on limited backends with CUDA
- nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks
- if engine:
- batch_size = model.batch_size
- else:
- device = model.device
- if not (pt or jit):
- batch_size = 1 # export.py models default to batch-size 1
- LOGGER.info(f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models")
-
- # Data
- data = check_dataset(data) # check
-
- # Configure
- model.eval()
- cuda = device.type != "cpu"
- is_coco = isinstance(data.get("val"), str) and data["val"].endswith(f"coco{os.sep}val2017.txt") # COCO dataset
- nc = 1 if single_cls else int(data["nc"]) # number of classes
- iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
- niou = iouv.numel()
-
- # Dataloader
- if not training:
- if pt and not single_cls: # check --weights are trained on --data
- ncm = model.model.nc
- assert ncm == nc, (
- f"{weights} ({ncm} classes) trained on different --data than what you passed ({nc} "
- f"classes). Pass correct combination of --weights and --data that are trained together."
- )
- model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
- pad, rect = (0.0, False) if task == "speed" else (0.5, pt) # square inference for benchmarks
- task = task if task in ("train", "val", "test") else "val" # path to train/val/test images
- dataloader = create_dataloader(
- data[task],
- imgsz,
- batch_size,
- stride,
- single_cls,
- pad=pad,
- rect=rect,
- workers=workers,
- prefix=colorstr(f"{task}: "),
- overlap_mask=overlap,
- mask_downsample_ratio=mask_downsample_ratio,
- )[0]
-
- seen = 0
- confusion_matrix = ConfusionMatrix(nc=nc)
- names = model.names if hasattr(model, "names") else model.module.names # get class names
- if isinstance(names, (list, tuple)): # old format
- names = dict(enumerate(names))
- class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
- s = ("%22s" + "%11s" * 10) % (
- "Class",
- "Images",
- "Instances",
- "Box(P",
- "R",
- "mAP50",
- "mAP50-95)",
- "Mask(P",
- "R",
- "mAP50",
- "mAP50-95)",
- )
- dt = Profile(device=device), Profile(device=device), Profile(device=device)
- metrics = Metrics()
- loss = torch.zeros(4, device=device)
- jdict, stats = [], []
- # callbacks.run('on_val_start')
- pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
- for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):
- # callbacks.run('on_val_batch_start')
- with dt[0]:
- if cuda:
- im = im.to(device, non_blocking=True)
- targets = targets.to(device)
- masks = masks.to(device)
- masks = masks.float()
- im = im.half() if half else im.float() # uint8 to fp16/32
- im /= 255 # 0 - 255 to 0.0 - 1.0
- nb, _, height, width = im.shape # batch size, channels, height, width
-
- # Inference
- with dt[1]:
- preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None)
-
- # Loss
- if compute_loss:
- loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls
-
- # NMS
- targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
- lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
- with dt[2]:
- preds = non_max_suppression(
- preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm
- )
-
- # Metrics
- plot_masks = [] # masks for plotting
- for si, (pred, proto) in enumerate(zip(preds, protos)):
- labels = targets[targets[:, 0] == si, 1:]
- nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
- path, shape = Path(paths[si]), shapes[si][0]
- correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
- correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
- seen += 1
-
- if npr == 0:
- if nl:
- stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0]))
- if plots:
- confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
- continue
-
- # Masks
- midx = [si] if overlap else targets[:, 0] == si
- gt_masks = masks[midx]
- pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:])
-
- # Predictions
- if single_cls:
- pred[:, 5] = 0
- predn = pred.clone()
- scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
-
- # Evaluate
- if nl:
- tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
- scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
- labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
- correct_bboxes = process_batch(predn, labelsn, iouv)
- correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True)
- if plots:
- confusion_matrix.process_batch(predn, labelsn)
- stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls)
-
- pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
- if plots and batch_i < 3:
- plot_masks.append(pred_masks[:15]) # filter top 15 to plot
-
- # Save/log
- if save_txt:
- save_one_txt(predn, save_conf, shape, file=save_dir / "labels" / f"{path.stem}.txt")
- if save_json:
- pred_masks = scale_image(
- im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]
- )
- save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary
- # callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
-
- # Plot images
- if plots and batch_i < 3:
- if len(plot_masks):
- plot_masks = torch.cat(plot_masks, dim=0)
- plot_images_and_masks(im, targets, masks, paths, save_dir / f"val_batch{batch_i}_labels.jpg", names)
- plot_images_and_masks(
- im,
- output_to_target(preds, max_det=15),
- plot_masks,
- paths,
- save_dir / f"val_batch{batch_i}_pred.jpg",
- names,
- ) # pred
-
- # callbacks.run('on_val_batch_end')
-
- # Compute metrics
- stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
- if len(stats) and stats[0].any():
- results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names)
- metrics.update(results)
- nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class
-
- # Print results
- pf = "%22s" + "%11i" * 2 + "%11.3g" * 8 # print format
- LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results()))
- if nt.sum() == 0:
- LOGGER.warning(f"WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels")
-
- # Print results per class
- if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
- for i, c in enumerate(metrics.ap_class_index):
- LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))
-
- # Print speeds
- t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image
- if not training:
- shape = (batch_size, 3, imgsz, imgsz)
- LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}" % t)
-
- # Plots
- if plots:
- confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
- # callbacks.run('on_val_end')
-
- mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results()
-
- # Save JSON
- if save_json and len(jdict):
- w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else "" # weights
- anno_json = str(Path("../../datasets/coco/annotations/instances_val2017.json")) # annotations
- pred_json = str(save_dir / f"{w}_predictions.json") # predictions
- LOGGER.info(f"\nEvaluating pycocotools mAP... saving {pred_json}...")
- with open(pred_json, "w") as f:
- json.dump(jdict, f)
-
- try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
- from pycocotools.coco import COCO
- from pycocotools.cocoeval import COCOeval
-
- anno = COCO(anno_json) # init annotations api
- pred = anno.loadRes(pred_json) # init predictions api
- results = []
- for eval in COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "segm"):
- if is_coco:
- eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate
- eval.evaluate()
- eval.accumulate()
- eval.summarize()
- results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5)
- map_bbox, map50_bbox, map_mask, map50_mask = results
- except Exception as e:
- LOGGER.info(f"pycocotools unable to run: {e}")
-
- # Return results
- model.float() # for training
- if not training:
- s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
- final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask
- return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t
-
-
-def parse_opt():
- """Parses command line arguments for configuring YOLOv5 options like dataset path, weights, batch size, and
- inference settings.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument("--data", type=str, default=ROOT / "data/coco128-seg.yaml", help="dataset.yaml path")
- parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-seg.pt", help="model path(s)")
- parser.add_argument("--batch-size", type=int, default=32, help="batch size")
- parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)")
- parser.add_argument("--conf-thres", type=float, default=0.001, help="confidence threshold")
- parser.add_argument("--iou-thres", type=float, default=0.6, help="NMS IoU threshold")
- parser.add_argument("--max-det", type=int, default=300, help="maximum detections per image")
- parser.add_argument("--task", default="val", help="train, val, test, speed or study")
- parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
- parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
- parser.add_argument("--single-cls", action="store_true", help="treat as single-class dataset")
- parser.add_argument("--augment", action="store_true", help="augmented inference")
- parser.add_argument("--verbose", action="store_true", help="report mAP by class")
- parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
- parser.add_argument("--save-hybrid", action="store_true", help="save label+prediction hybrid results to *.txt")
- parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
- parser.add_argument("--save-json", action="store_true", help="save a COCO-JSON results file")
- parser.add_argument("--project", default=ROOT / "runs/val-seg", help="save results to project/name")
- parser.add_argument("--name", default="exp", help="save to project/name")
- parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
- parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
- parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
- opt = parser.parse_args()
- opt.data = check_yaml(opt.data) # check YAML
- # opt.save_json |= opt.data.endswith('coco.yaml')
- opt.save_txt |= opt.save_hybrid
- print_args(vars(opt))
- return opt
-
-
-def main(opt):
- """Executes YOLOv5 tasks including training, validation, testing, speed, and study with configurable options."""
- check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
-
- if opt.task in ("train", "val", "test"): # run normally
- if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
- LOGGER.warning(f"WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results")
- if opt.save_hybrid:
- LOGGER.warning("WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone")
- run(**vars(opt))
-
- else:
- weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
- opt.half = torch.cuda.is_available() and opt.device != "cpu" # FP16 for fastest results
- if opt.task == "speed": # speed benchmarks
- # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
- opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
- for opt.weights in weights:
- run(**vars(opt), plots=False)
-
- elif opt.task == "study": # speed vs mAP benchmarks
- # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
- for opt.weights in weights:
- f = f"study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt" # filename to save to
- x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
- for opt.imgsz in x: # img-size
- LOGGER.info(f"\nRunning {f} --imgsz {opt.imgsz}...")
- r, _, t = run(**vars(opt), plots=False)
- y.append(r + t) # results and times
- np.savetxt(f, y, fmt="%10.4g") # save
- subprocess.run(["zip", "-r", "study.zip", "study_*.txt"])
- plot_val_study(x=x) # plot
- else:
- raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")')
-
-
-if __name__ == "__main__":
- opt = parse_opt()
- main(opt)