|
| 1 | +from activelearning.base_model import BaseModel |
| 2 | +from detect import detect |
| 3 | +import argparse |
| 4 | +import logging |
| 5 | +import os |
| 6 | +import random |
| 7 | +import time |
| 8 | +from pathlib import Path |
| 9 | +from train import train |
| 10 | +from warnings import warn |
| 11 | +import numpy as np |
| 12 | +import torch.distributed as dist |
| 13 | +import torch.utils.data |
| 14 | +import yaml |
| 15 | +from torch.utils.tensorboard import SummaryWriter |
| 16 | +from utils.general import increment_path, fitness, get_latest_run, check_file, check_git_status, print_mutation, set_logging, strip_optimizer |
| 17 | +from utils.plots import plot_evolution |
| 18 | +from utils.torch_utils import select_device |
| 19 | + |
| 20 | +logger = logging.getLogger(__name__) |
| 21 | + |
| 22 | +try: |
| 23 | + import wandb |
| 24 | +except ImportError: |
| 25 | + wandb = None |
| 26 | + logger.info( |
| 27 | + "Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)") |
| 28 | + |
| 29 | + |
| 30 | +class Yolov5(BaseModel): |
| 31 | + model_type = 'Yolo version 5' |
| 32 | + |
| 33 | + def train(self): |
| 34 | + parser = argparse.ArgumentParser() |
| 35 | + parser.add_argument('--weights', type=str, default='activelearning/yolov5s.pt', help='initial weights path') |
| 36 | + parser.add_argument('--cfg', type=str, default='models/yolo_gun.yaml', help='models/yolo_gun.yaml path') |
| 37 | + parser.add_argument('--data', type=str, default='data/gun.yaml', help='data.yaml path') |
| 38 | + parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') |
| 39 | + parser.add_argument('--epochs', type=int, default=1) |
| 40 | + parser.add_argument('--batch-size', type=int, default=8, help='total batch size for all GPUs') |
| 41 | + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') |
| 42 | + parser.add_argument('--rect', action='store_true', help='rectangular training') |
| 43 | + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') |
| 44 | + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') |
| 45 | + parser.add_argument('--notest', action='store_true', help='only test final epoch') |
| 46 | + parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') |
| 47 | + parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') |
| 48 | + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') |
| 49 | + parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') |
| 50 | + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') |
| 51 | + parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') # using GPU 0 |
| 52 | + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') |
| 53 | + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') |
| 54 | + # parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') |
| 55 | + parser.add_argument('--adam', type=int, default=1, help='use torch.optim.Adam() optimizer') # using Adam optimizer |
| 56 | + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') |
| 57 | + parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') |
| 58 | + parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100') |
| 59 | + parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model') |
| 60 | + parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') |
| 61 | + parser.add_argument('--project', default='runs/train', help='save to project/name') |
| 62 | + parser.add_argument('--name', default='gun', help='save to project/name') |
| 63 | + # parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') |
| 64 | + parser.add_argument('--exist-ok', type=int, default=1, help='existing project/name ok, do not increment') |
| 65 | + opt = parser.parse_args() |
| 66 | + |
| 67 | + # Set DDP variables |
| 68 | + opt.total_batch_size = opt.batch_size |
| 69 | + opt.world_size = int( |
| 70 | + os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 |
| 71 | + opt.global_rank = int( |
| 72 | + os.environ['RANK']) if 'RANK' in os.environ else -1 |
| 73 | + set_logging(opt.global_rank) |
| 74 | + if opt.global_rank in [-1, 0]: |
| 75 | + check_git_status() |
| 76 | + |
| 77 | + # Resume |
| 78 | + if opt.resume: # resume an interrupted run |
| 79 | + # specified or most recent path |
| 80 | + ckpt = opt.resume if isinstance( |
| 81 | + opt.resume, str) else get_latest_run() |
| 82 | + assert os.path.isfile( |
| 83 | + ckpt), 'ERROR: --resume checkpoint does not exist' |
| 84 | + with open(Path(ckpt).parent.parent / 'opt.yaml') as f: |
| 85 | + opt = argparse.Namespace( |
| 86 | + **yaml.load(f, Loader=yaml.FullLoader)) # replace |
| 87 | + opt.cfg, opt.weights, opt.resume = '', ckpt, True |
| 88 | + logger.info('Resuming training from %s' % ckpt) |
| 89 | + else: |
| 90 | + # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') |
| 91 | + opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file( |
| 92 | + opt.cfg), check_file(opt.hyp) # check files |
| 93 | + assert len(opt.cfg) or len( |
| 94 | + opt.weights), 'either --cfg or --weights must be specified' |
| 95 | + # extend to 2 sizes (train, test) |
| 96 | + opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) |
| 97 | + opt.name = 'evolve' if opt.evolve else opt.name |
| 98 | + opt.save_dir = increment_path(Path( |
| 99 | + opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run |
| 100 | + |
| 101 | + # DDP mode |
| 102 | + device = select_device(opt.device, batch_size=opt.batch_size) |
| 103 | + if opt.local_rank != -1: |
| 104 | + assert torch.cuda.device_count() > opt.local_rank |
| 105 | + torch.cuda.set_device(opt.local_rank) |
| 106 | + device = torch.device('cuda', opt.local_rank) |
| 107 | + # distributed backend |
| 108 | + dist.init_process_group(backend='nccl', init_method='env://') |
| 109 | + assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' |
| 110 | + opt.batch_size = opt.total_batch_size // opt.world_size |
| 111 | + |
| 112 | + # Hyperparameters |
| 113 | + with open(opt.hyp) as f: |
| 114 | + hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps |
| 115 | + if 'box' not in hyp: |
| 116 | + warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' % |
| 117 | + (opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120')) |
| 118 | + hyp['box'] = hyp.pop('giou') |
| 119 | + |
| 120 | + # Train |
| 121 | + logger.info(opt) |
| 122 | + if not opt.evolve: |
| 123 | + tb_writer = None # init loggers |
| 124 | + if opt.global_rank in [-1, 0]: |
| 125 | + logger.info( |
| 126 | + f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') |
| 127 | + tb_writer = SummaryWriter(opt.save_dir) # Tensorboard |
| 128 | + train(hyp, opt, device, tb_writer, wandb) |
| 129 | + |
| 130 | + # Evolve hyperparameters (optional) |
| 131 | + else: |
| 132 | + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) |
| 133 | + meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) |
| 134 | + # final OneCycleLR learning rate (lr0 * lrf) |
| 135 | + 'lrf': (1, 0.01, 1.0), |
| 136 | + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 |
| 137 | + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay |
| 138 | + # warmup epochs (fractions ok) |
| 139 | + 'warmup_epochs': (1, 0.0, 5.0), |
| 140 | + # warmup initial momentum |
| 141 | + 'warmup_momentum': (1, 0.0, 0.95), |
| 142 | + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr |
| 143 | + 'box': (1, 0.02, 0.2), # box loss gain |
| 144 | + 'cls': (1, 0.2, 4.0), # cls loss gain |
| 145 | + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight |
| 146 | + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) |
| 147 | + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight |
| 148 | + 'iou_t': (0, 0.1, 0.7), # IoU training threshold |
| 149 | + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold |
| 150 | + # anchors per output grid (0 to ignore) |
| 151 | + 'anchors': (2, 2.0, 10.0), |
| 152 | + # focal loss gamma (efficientDet default gamma=1.5) |
| 153 | + 'fl_gamma': (0, 0.0, 2.0), |
| 154 | + # image HSV-Hue augmentation (fraction) |
| 155 | + 'hsv_h': (1, 0.0, 0.1), |
| 156 | + # image HSV-Saturation augmentation (fraction) |
| 157 | + 'hsv_s': (1, 0.0, 0.9), |
| 158 | + # image HSV-Value augmentation (fraction) |
| 159 | + 'hsv_v': (1, 0.0, 0.9), |
| 160 | + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) |
| 161 | + # image translation (+/- fraction) |
| 162 | + 'translate': (1, 0.0, 0.9), |
| 163 | + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) |
| 164 | + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) |
| 165 | + # image perspective (+/- fraction), range 0-0.001 |
| 166 | + 'perspective': (0, 0.0, 0.001), |
| 167 | + # image flip up-down (probability) |
| 168 | + 'flipud': (1, 0.0, 1.0), |
| 169 | + # image flip left-right (probability) |
| 170 | + 'fliplr': (0, 0.0, 1.0), |
| 171 | + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) |
| 172 | + 'mixup': (1, 0.0, 1.0)} # image mixup (probability) |
| 173 | + |
| 174 | + assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' |
| 175 | + opt.notest, opt.nosave = True, True # only test/save final epoch |
| 176 | + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices |
| 177 | + yaml_file = Path(opt.save_dir) / \ |
| 178 | + 'hyp_evolved.yaml' # save best result here |
| 179 | + if opt.bucket: |
| 180 | + os.system('gsutil cp gs://%s/evolve.txt .' % |
| 181 | + opt.bucket) # download evolve.txt if exists |
| 182 | + |
| 183 | + for _ in range(300): # generations to evolve |
| 184 | + if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate |
| 185 | + # Select parent(s) |
| 186 | + parent = 'single' # parent selection method: 'single' or 'weighted' |
| 187 | + x = np.loadtxt('evolve.txt', ndmin=2) |
| 188 | + # number of previous results to consider |
| 189 | + n = min(5, len(x)) |
| 190 | + x = x[np.argsort(-fitness(x))][:n] # top n mutations |
| 191 | + w = fitness(x) - fitness(x).min() # weights |
| 192 | + if parent == 'single' or len(x) == 1: |
| 193 | + # x = x[random.randint(0, n - 1)] # random selection |
| 194 | + x = x[random.choices(range(n), weights=w)[ |
| 195 | + 0]] # weighted selection |
| 196 | + elif parent == 'weighted': |
| 197 | + x = (x * w.reshape(n, 1)).sum(0) / \ |
| 198 | + w.sum() # weighted combination |
| 199 | + |
| 200 | + # Mutate |
| 201 | + mp, s = 0.8, 0.2 # mutation probability, sigma |
| 202 | + npr = np.random |
| 203 | + npr.seed(int(time.time())) |
| 204 | + g = np.array([x[0] for x in meta.values()]) # gains 0-1 |
| 205 | + ng = len(meta) |
| 206 | + v = np.ones(ng) |
| 207 | + while all(v == 1): # mutate until a change occurs (prevent duplicates) |
| 208 | + v = (g * (npr.random(ng) < mp) * npr.randn(ng) |
| 209 | + * npr.random() * s + 1).clip(0.3, 3.0) |
| 210 | + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) |
| 211 | + hyp[k] = float(x[i + 7] * v[i]) # mutate |
| 212 | + |
| 213 | + # Constrain to limits |
| 214 | + for k, v in meta.items(): |
| 215 | + hyp[k] = max(hyp[k], v[1]) # lower limit |
| 216 | + hyp[k] = min(hyp[k], v[2]) # upper limit |
| 217 | + hyp[k] = round(hyp[k], 5) # significant digits |
| 218 | + |
| 219 | + # Train mutation |
| 220 | + results = train(hyp.copy(), opt, device, wandb=wandb) |
| 221 | + |
| 222 | + # Write mutation results |
| 223 | + print_mutation(hyp.copy(), results, yaml_file, opt.bucket) |
| 224 | + |
| 225 | + # Plot results |
| 226 | + plot_evolution(yaml_file) |
| 227 | + print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' |
| 228 | + f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') |
| 229 | + |
| 230 | + def detect(self): |
| 231 | + parser = argparse.ArgumentParser() |
| 232 | + parser.add_argument('--weights', nargs='+', type=str, default='activelearning/yolov5s.pt', help='model.pt path(s)') |
| 233 | + parser.add_argument('--source', type=str, default='data/test', help='source') # file/folder, 0 for webcam |
| 234 | + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') |
| 235 | + parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') |
| 236 | + parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') |
| 237 | + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') |
| 238 | + parser.add_argument('--view-img', action='store_true', help='display results') |
| 239 | + # parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') |
| 240 | + parser.add_argument('--save-txt', type=int, default=1, help='save results to *.txt') |
| 241 | + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') |
| 242 | + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') |
| 243 | + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') |
| 244 | + parser.add_argument('--augment', action='store_true', help='augmented inference') |
| 245 | + parser.add_argument('--update', action='store_true', help='update all models') |
| 246 | + parser.add_argument('--project', default='runs/detect', help='save results to project/name') |
| 247 | + parser.add_argument('--name', default='gun', help='save results to project/name') |
| 248 | + # parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') |
| 249 | + parser.add_argument('--exist-ok', type=int, default=1, help='existing project/name ok, do not increment') |
| 250 | + opt = parser.parse_args() |
| 251 | + print(opt) |
| 252 | + |
| 253 | + with torch.no_grad(): |
| 254 | + if opt.update: # update all models (to fix SourceChangeWarning) |
| 255 | + for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: |
| 256 | + detect(opt) |
| 257 | + strip_optimizer(opt.weights) |
| 258 | + else: |
| 259 | + detect(opt) |
| 260 | + |
| 261 | +if __name__ == '__main__': |
| 262 | + model = Yolov5() |
| 263 | + model.train() |
0 commit comments