增加跟踪的一些逻辑代码

This commit is contained in:
2025-03-24 17:42:10 +08:00
parent bba39adcfc
commit f32cd5b9a2
47 changed files with 3395 additions and 17 deletions

View File

@ -891,7 +891,7 @@ class AutoShape(nn.Module):
x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
with amp.autocast(autocast):
with torch.amp.autocast(device_type='cuda', enabled=autocast):
# Inference
with dt[1]:
y = self.model(x, augment=augment) # forward

View File

@ -352,7 +352,7 @@ def train(hyp, opt, device, callbacks):
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = torch.cuda.amp.GradScaler(enabled=amp)
scaler = torch.amp.GradScaler(device='cuda', enabled=amp)
stopper, stop = EarlyStopping(patience=opt.patience), False
compute_loss = ComputeLoss(model) # init loss class
callbacks.run("on_train_start")