改进项目结构

This commit is contained in:
2025-03-14 15:44:41 +08:00
parent 8478b4a102
commit 9e79fb6a6d
95 changed files with 13745 additions and 64 deletions

2
yolov5/.gitignore vendored
View File

@ -24,7 +24,7 @@
!cfg/yolov3*.cfg
storage.googleapis.com
../runs/*
runs/*
data/*
data/images/*
!data/*.yaml

View File

@ -35,9 +35,9 @@ except (ImportError, AssertionError):
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from yolov5.utils import TryExcept
from yolov5.utils.dataloaders import exif_transpose, letterbox
from yolov5.utils.general import (
from utils import TryExcept
from utils.dataloaders import exif_transpose, letterbox
from utils.general import (
LOGGER,
ROOT,
Profile,
@ -54,7 +54,7 @@ from yolov5.utils.general import (
xyxy2xywh,
yaml_load,
)
from yolov5.utils.torch_utils import copy_attr, smart_inference_mode
from utils.torch_utils import copy_attr, smart_inference_mode
def autopad(k, p=None, d=1):
@ -473,7 +473,7 @@ class DetectMultiBackend(nn.Module):
# TensorFlow Lite: *.tflite
# TensorFlow Edge TPU: *_edgetpu.tflite
# PaddlePaddle: *_paddle_model
from yolov5.models.experimental import attempt_download, attempt_load # scoped to avoid circular import
from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
@ -661,7 +661,7 @@ class DetectMultiBackend(nn.Module):
elif triton: # NVIDIA Triton Inference Server
LOGGER.info(f"Using {w} as Triton Inference Server...")
check_requirements("tritonclient[all]")
from yolov5.utils.triton import TritonRemoteModel
from utils.triton import TritonRemoteModel
model = TritonRemoteModel(url=w)
nhwc = model.runtime.startswith("tensorflow")
@ -780,8 +780,8 @@ class DetectMultiBackend(nn.Module):
Example: path='path/to/model.onnx' -> type=onnx
"""
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
from yolov5.export import export_formats
from yolov5.utils.downloads import is_url
from export import export_formats
from utils.downloads import is_url
sf = list(export_formats().Suffix) # export suffixes
if not is_url(p, check=False):

View File

@ -7,7 +7,7 @@ import numpy as np
import torch
import torch.nn as nn
from yolov5.utils.downloads import attempt_download
from utils.downloads import attempt_download
class Sum(nn.Module):
@ -91,7 +91,7 @@ def attempt_load(weights, device=None, inplace=True, fuse=True):
Example inputs: weights=[a,b,c] or a single model weights=[a] or weights=a.
"""
from yolov5.models.yolo import Detect, Model
from models.yolo import Detect, Model
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:

View File

@ -413,7 +413,7 @@ def run(
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else "" # weights
anno_json = str(Path("../../datasets/coco/annotations/instances_val2017.json")) # annotations
anno_json = str(Path("../datasets/coco/annotations/instances_val2017.json")) # annotations
pred_json = str(save_dir / f"{w}_predictions.json") # predictions
LOGGER.info(f"\nEvaluating pycocotools mAP... saving {pred_json}...")
with open(pred_json, "w") as f:

View File

@ -409,7 +409,7 @@ def train(hyp, opt, device, callbacks):
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
# Forward
with torch.amp.autocast(device_type='cuda',enabled=amp):
with torch.cuda.amp.autocast(amp):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if RANK != -1:
@ -566,7 +566,7 @@ def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path")
parser.add_argument("--cfg", type=str, default="", help="model.yaml path")
parser.add_argument("--data", type=str, default=ROOT / "data/5t5.yaml", help="dataset.yaml path")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch-low.yaml", help="hyperparameters path")
parser.add_argument("--epochs", type=int, default=100, help="total training epochs")
parser.add_argument("--batch-size", type=int, default=16, help="total batch size for all GPUs, -1 for autobatch")

View File

@ -10,8 +10,8 @@ import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from yolov5.utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from yolov5.utils.metrics import bbox_ioa
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean
IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation

View File

@ -26,7 +26,7 @@ from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from yolov5.utils.augmentations import (
from utils.augmentations import (
Albumentations,
augment_hsv,
classify_albumentations,
@ -36,7 +36,7 @@ from yolov5.utils.augmentations import (
mixup,
random_perspective,
)
from yolov5.utils.general import (
from utils.general import (
DATASETS_DIR,
LOGGER,
NUM_THREADS,
@ -55,7 +55,7 @@ from yolov5.utils.general import (
xywhn2xyxy,
xyxy2xywhn,
)
from yolov5.utils.torch_utils import torch_distributed_zero_first
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = "See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data"

View File

@ -45,9 +45,9 @@ except (ImportError, AssertionError):
from ultralytics.utils.checks import check_requirements
from yolov5.utils import TryExcept, emojis
from yolov5.utils.downloads import curl_download, gsutil_getsize
from yolov5.utils.metrics import box_iou, fitness
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
@ -585,7 +585,7 @@ def check_dataset(data, autodownload=True):
def check_amp(model):
"""Checks PyTorch AMP functionality for a model, returns True if AMP operates correctly, otherwise False."""
from yolov5.models.common import AutoShape, DetectMultiBackend
from models.common import AutoShape, DetectMultiBackend
def amp_allclose(model, im):
"""Compares FP32 and AMP model inference outputs, ensuring they are close within a 10% absolute tolerance."""
@ -611,27 +611,6 @@ def check_amp(model):
return False
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
# Clip bounding xyxy bounding boxes to image shape (height, width)
coords[:, 0].clamp_(0, img0_shape[1]) # x1
coords[:, 1].clamp_(0, img0_shape[0]) # y1
coords[:, 2].clamp_(0, img0_shape[1]) # x2
coords[:, 3].clamp_(0, img0_shape[0]) # y2
return coords
def yaml_load(file="data.yaml"):
"""Safely loads and returns the contents of a YAML file specified by `file` argument."""
with open(file, errors="ignore") as f:

View File

@ -9,7 +9,7 @@ import matplotlib.pyplot as plt
import numpy as np
import torch
from yolov5.utils import TryExcept, threaded
from utils import TryExcept, threaded
def fitness(x):

View File

@ -18,9 +18,9 @@ from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from yolov5.utils import TryExcept, threaded
from yolov5.utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from yolov5.utils.metrics import fitness
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
# Settings
RANK = int(os.getenv("RANK", -1))
@ -372,7 +372,7 @@ def plot_labels(labels, names=(), save_dir=Path("")):
def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path("images.jpg")):
"""Displays a grid of images with optional labels and predictions, saving to a file."""
from yolov5.utils.augmentations import denormalize
from utils.augmentations import denormalize
names = names or [f"class{i}" for i in range(1000)]
blocks = torch.chunk(

View File

@ -17,7 +17,7 @@ import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from yolov5.utils.general import LOGGER, check_version, colorstr, file_date, git_describe
from utils.general import LOGGER, check_version, colorstr, file_date, git_describe
LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv("RANK", -1))
@ -68,7 +68,7 @@ def smart_DDP(model):
def reshape_classifier_output(model, n=1000):
"""Reshapes last layer of model to match class count 'n', supporting Classify, Linear, Sequential types."""
from yolov5.models.common import Classify
from models.common import Classify
name, m = list((model.model if hasattr(model, "model") else model).named_children())[-1] # last module
if isinstance(m, Classify): # YOLOv5 Classify() head