diff --git a/app/yolov5/train_server.py b/app/yolov5/train_server.py index 0146c60..0d50d5f 100644 --- a/app/yolov5/train_server.py +++ b/app/yolov5/train_server.py @@ -217,16 +217,16 @@ def train(hyp, opt, device, data_list,id,callbacks): # hyp is path/to/hyp.yaml best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) del ckpt, csd - # DP mode - if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') - model = torch.nn.DataParallel(model) + # # DP mode + # if cuda and RANK == -1 and torch.cuda.device_count() > 1: + # LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + # 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + # model = torch.nn.DataParallel(model) - # SyncBatchNorm - if opt.sync_bn and cuda and RANK != -1: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - LOGGER.info('Using SyncBatchNorm()') + # # SyncBatchNorm + # if opt.sync_bn and cuda and RANK != -1: + # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + # LOGGER.info('Using SyncBatchNorm()') print("Trainloader") # Trainloader