Surprise! We've been running on hardware provided by BuyVM for a few months and wanted to show them a little appreciation.
Running a paste site comes with unique challenges, ones that aren't always obvious and hard to control. As such, BuyVM offered us a home where we could worry less about the hosting side of things and focus on maintaining a clean and useful service! Go check them out and show them some love!
Description: pytorch script for training on ImageNet
Submitted on December 22, 2018 at 03:37 PM

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
#in the name of God, The most compassionate the most merciful 
# https://github.com/pytorch/vision/blob/master/torchvision/models/__init__.py
import argparse
import os
import shutil
import time
import os, sys, pdb, shutil, time, random, datetime
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from utils import convert_secs2time, time_string, time_file_str,AverageMeter
import torch.optim.lr_scheduler as lr_scheduler
# from models import print_log
import models
from tensorboardX import SummaryWriter
from utils import convert_model, measure_model
import torchvision.transforms.functional as TF


model_names = sorted(name for name in models.__dict__
    if name.islower() and not name.startswith("__")
    and callable(models.__dict__[name]))

parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR', help='path to directory containing training and validation folders')
parser.add_argument('--train_dir_name', metavar='DIR', help='training set directory name')
parser.add_argument('--val_dir_name', metavar='DIR', help='validation set directory name')
parser.add_argument('--save_dir', type=str, default='./', help='Folder to save checkpoints and log.')
parser.add_argument('--arch', '-a', metavar='ARCH', default='simpnet_imgnet_drpall',
                    choices=model_names,
                    help='model architecture: ' +
                        ' | '.join(model_names) +
                        ' (default: simpnet_imgnet_drpall)')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', help='number of data loading workers (default: 16)')
parser.add_argument('--smode','--stridemode', default=1, type=int, metavar='N', help='stride mode (1 to 5 (deault: 1)')
parser.add_argument('--netidx', '--network-index', default=0, type=int, metavar='N', help='The index used for selecting 5mil or 8mil network (default: 0)')
parser.add_argument('--scale', default=1.0, type=float, metavar='S', help='scale factor for the architecture width(common values: 1, 0.75, 1.44 (default:1.0)')
parser.add_argument('--epochs', default=200, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int, metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.045, type=float, metavar='LR', help='initial learning rate') #0.045
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0.00004, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=200, type=int, metavar='N', help='print frequency (default: 100)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('--gpus', default=None, help='List of GPUs used for training - e.g 0,1,3')

args = parser.parse_args()
args.prefix = time_file_str()


__imagenet_pca = {
    'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
    'eigvec': torch.Tensor([
        [-0.5675,  0.7192,  0.4009],
        [-0.5808, -0.0045, -0.8140],
        [-0.5836, -0.6948,  0.4203],
    ])
}

# Lighting data augmentation take from here - https://github.com/eladhoffer/convNet.pytorch/blob/master/preprocess.py
class Lighting(object):
    """Lighting noise(AlexNet - style PCA - based noise)"""

    def __init__(self, alphastd, eigval, eigvec):
        self.alphastd = alphastd
        self.eigval = eigval
        self.eigvec = eigvec

    def __call__(self, img):
        if self.alphastd == 0:
            return img
            
        #img = TF.to_tensor(img)  
        alpha = img.new().resize_(3).normal_(0, self.alphastd)
        rgb = self.eigvec.type_as(img).clone()\
            .mul(alpha.view(1, 3).expand(3, 3))\
            .mul(self.eigval.view(1, 3).expand(3, 3))\
            .sum(1).squeeze()
        return img.add(rgb.view(3, 1, 1).expand_as(img))

def main():

    best_prec1 = 0
    best_prec5 = 0

    if args.gpus is not None:
        args.gpus = [int(i) for i in args.gpus.split(',')]
        device = 'cuda:' + str(args.gpus[0])
        print('device is ', device)
    else:
        device = 'cpu'

    if not os.path.isdir(args.save_dir):
      os.makedirs(args.save_dir)
    # used for file names, etc 
    time_stamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')     
    log = open(os.path.join(args.save_dir, '{}_s{}_{}.{}_{}.log'.format(args.arch, args.smode, args.scale, args.prefix, time_stamp)), 'w')

    writer = SummaryWriter('runs/{}_s{}_{}.{}_{}'.format(args.arch, args.smode, args.scale, args.prefix, time_stamp))

    # create model
    print_log("=> creating model '{}_s{}_{}'".format(args.arch, args.smode, args.scale), log)
    
    if 'simpnet_imgnet_drpall' in args.arch:
        model = models.__dict__[args.arch](classes=1000, scale=args.scale, network_idx=args.netidx, mode=args.smode)
    else:
        model = models.__dict__[args.arch]() 

    print_log("=> Model : {}".format(model), log)
    print_log("=> parameter : {}".format(args), log)

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
      model.features = torch.nn.DataParallel(model.features)
      model.cuda()
    else:
      #model = torch.nn.DataParallel(model).cuda()
      model = torch.nn.DataParallel(model, args.gpus).cuda()


    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr, alpha=0.9, eps=1, weight_decay= 0.00004, momentum=0.9, centered=False)

    IMAGE_SIZE=224
    print_log(summary(model, input_size=(3, IMAGE_SIZE, IMAGE_SIZE)), log)

    print_log("=> optimizer : {}'".format(str(optimizer)), log)
    n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)
    print_log('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6), log)

    milestones = [30, 60, 90, 130, 150]
    scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.98, last_epoch=-1)

    # if we are resuming from a model that did not save the scheduler state.(used only for old snapshots)
    no_scheduler_stat = False
    if(no_scheduler_stat == True):
        print(scheduler.state_dict())
        tmp = scheduler.state_dict()
        tmp['last_epoch']=137
        print(tmp)


   
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print_log("=> loading checkpoint '{}'".format(args.resume), log)
            checkpoint = torch.load(args.resume)
            if args.start_epoch ==0 :
                args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            if 'best_prec5' in checkpoint:
                best_prec5 = checkpoint['best_prec5']
            else:
                best_prec5 = 0.00
            model.load_state_dict(checkpoint['state_dict'])
            #loading scheduler state
            if no_scheduler_stat:
                scheduler.load_state_dict(tmp)   
            # else:
            #     if args.start_epoch ==0 :  
            scheduler.load_state_dict(checkpoint['scheduler'])

            optimizer.load_state_dict(checkpoint['optimizer'])
            model.eval()
            print_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), log)
        else:
            print_log("=> no checkpoint found at '{}'".format(args.resume), log)

    cudnn.benchmark = True


    # Data loading code
    traindir = os.path.join(args.data, args.train_dir_name) #'train')
    valdir = os.path.join(args.data, args.val_dir_name) #'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(224),
        transforms.ToTensor(),
        normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True, sampler=None)

    input_size = 224
    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion,log)
        return

    filename = os.path.join(args.save_dir, 'chkpt_{0}_{1}.pth.tar'.format(args.arch, time_stamp))
    bestname = os.path.join(args.save_dir, 'best_{0}_{1}.pth.tar'.format(args.arch, time_stamp))
    bestname_weight = os.path.join(args.save_dir, 'bestweight_{0}_{1}.pth'.format(args.arch, time_stamp))
    
    start_time = time.time()
    epoch_time = AverageMeter()
    for epoch in range(args.start_epoch, args.epochs):

        # for step based exp scheduler
        # for param_group in optimizer.param_groups:
        #     current_learning_rate = param_group['lr']
        current_learning_rate = float(scheduler.get_lr()[-1])
        ###for exp  decay 2 epochs per 
        if (epoch%2 ==0 and epoch >0):
            scheduler.step()

        ###current_learning_rate = adjust_learning_rate(optimizer, epoch)

        need_hour, need_mins, need_secs = convert_secs2time(epoch_time.val * (args.epochs-epoch))
        need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
        #print_log(' [{:s}] :: {:3d}/{:3d} ----- [{:s}] {:s}'.format(args.arch, epoch, args.epochs, time_string(), need_time), log)

        print_log('\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:.6f}]'.format(time_string(), epoch, args.epochs, need_time, current_learning_rate) \
                + ' [Best : Accuracy(T1/T5)={:.2f}/{:.2f}, Error={:.2f}/{:.2f}]'.format(best_prec1, best_prec5, 100-best_prec1,100-best_prec5), log)

        # train for one epoch
        tr_prec1, tr_prec5, tr_loss = train(train_loader, model, criterion, optimizer, epoch, log)

        # evaluate on validation set
        prec1,prec5, val_loss = validate(val_loader, model, criterion, log)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        best_prec5 = max(prec5, best_prec5)
        
        
        writer.add_scalar('Learning rate ', current_learning_rate, epoch)
        writer.add_scalar('training/loss', tr_loss, epoch)
        writer.add_scalar('training/Top1', tr_prec1, epoch)
        writer.add_scalar('training/Top5', tr_prec5, epoch)

        writer.add_scalar('validation/loss', val_loss, epoch)
        writer.add_scalar('validation/Top1', prec1, epoch)
        writer.add_scalar('validation/Top5', prec5, epoch)

        if (epoch %100 == 0 and epoch >0 ):
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'best_prec5': best_prec5,
                'optimizer' : optimizer.state_dict(),
                'scheduler' : scheduler.state_dict()
            }, False, os.path.join(args.save_dir,'chkpt_{0}_epc{1}_{2}.pth.tar'.format(args.arch, epoch, time_stamp)), 'bst_void')

        # save only the weights
        if is_best:  
            save_checkpoint({
            'state_dict': model.state_dict()
        }, True, filename, bestname_weight)
         
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'best_prec5': best_prec5,
            'optimizer' : optimizer.state_dict(),
            'scheduler' : scheduler.state_dict()
        }, is_best, filename, bestname)


        # measure elapsed time
        epoch_time.update(time.time() - start_time)
        start_time = time.time()

    writer.close()       
    log.close()


def train(train_loader, model, criterion, optimizer, epoch, log):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        
        #for step based exp scheduler
        #learning_rate = exp_lr_scheduler(optimizer,iter=i,lr_decay_iter=6400)

        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)
        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)
        
        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))

        losses.update(loss.item(), input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))


        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print_log('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, top1=top1, top5=top5), log)
    return top1.avg, top5.avg, losses.avg


def validate(val_loader, model, criterion, log):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(val_loader):
            target = target.cuda(async=True)
            input_var = torch.autograd.Variable(input)
            target_var = torch.autograd.Variable(target)

            # compute output
            output = model(input_var)
            loss = criterion(output, target_var)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec1[0], input.size(0))
            top5.update(prec5[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                print_log('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                       i, len(val_loader), batch_time=batch_time, loss=losses,
                       top1=top1, top5=top5), log)

        print_log(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss@ {error:.3f}'.format(top1=top1, top5=top5, error=losses.avg), log)

    return top1.avg, top5.avg, losses.avg


def save_checkpoint(state, is_best, filename, bestname):
    torch.save(state, filename)
    if is_best:
        shutil.copyfile(filename, bestname)


class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 like the pattern below
    30 60 90 130 150 
    """
    if(epoch <=90):
        lr = args.lr * (0.1 ** (epoch // 30))
    elif(epoch>90 and epoch<=130):
        lr = args.lr * (0.1 ** (epoch // 50))
    else:
        lr = args.lr * (0.1 ** (epoch // 20))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

def print_log(print_string, log):
  print("{}".format(print_string))
  log.write('{}\n'.format(print_string))
  log.flush()

def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res

import torch
import torch.nn as nn
from torch.autograd import Variable
from collections import OrderedDict

def summary(model, input_size):
        def register_hook(module):
            def hook(module, input, output):
                class_name = str(module.__class__).split('.')[-1].split("'")[0]
                module_idx = len(summary)

                m_key = '%s-%i' % (class_name, module_idx+1)
                summary[m_key] = OrderedDict()
                summary[m_key]['input_shape'] = list(input[0].size())
                summary[m_key]['input_shape'][0] = -1
                if isinstance(output, (list,tuple)):
                    summary[m_key]['output_shape'] = [[-1] + list(o.size())[1:] for o in output]
                else:
                    summary[m_key]['output_shape'] = list(output.size())
                    summary[m_key]['output_shape'][0] = -1

                params = 0
                if hasattr(module, 'weight') and hasattr(module.weight, 'size'):
                    params += torch.prod(torch.LongTensor(list(module.weight.size())))
                    summary[m_key]['trainable'] = module.weight.requires_grad
                if hasattr(module, 'bias') and hasattr(module.bias, 'size'):
                    params +=  torch.prod(torch.LongTensor(list(module.bias.size())))
                summary[m_key]['nb_params'] = params
                
            if (not isinstance(module, nn.Sequential) and 
               not isinstance(module, nn.ModuleList) and 
               not (module == model)):
                hooks.append(module.register_forward_hook(hook))
                
        if torch.cuda.is_available():
            dtype = torch.cuda.FloatTensor
        else:
            dtype = torch.FloatTensor
        
        # check if there are multiple inputs to the network
        if isinstance(input_size[0], (list, tuple)):
            x = [Variable(torch.rand(2,*in_size)).type(dtype) for in_size in input_size]
        else:
            x = Variable(torch.rand(2,*input_size)).type(dtype)
            
            
        # create properties
        summary = OrderedDict()
        hooks = []
        # register hook
        model.apply(register_hook)
        # make a forward pass
        model(x)
        # remove these hooks
        for h in hooks:
            h.remove()

        inf_str_summery = '-' * 64 
        #print('----------------------------------------------------------------')
        line_new = '{:>20}  {:>25} {:>15}'.format('Layer (type)', 'Output Shape', 'Param #')
        #print(line_new)
        inf_str_summery += '\n{0}'.format(line_new)
        inf_str_summery += '\n{0}'.format('='*64)
        #print('================================================================')
        total_params = 0
        trainable_params = 0
        for layer in summary:
            # input_shape, output_shape, trainable, nb_params
            line_new = '{:>20}  {:>25} {:>15}'.format(layer, str(summary[layer]['output_shape']), '{0:,}'.format(summary[layer]['nb_params']))
            total_params += summary[layer]['nb_params']
            if 'trainable' in summary[layer]:
                if summary[layer]['trainable'] == True:
                    trainable_params += summary[layer]['nb_params']
            #print(line_new)
            inf_str_summery += '\n{0}'.format(line_new)
        inf_str_summery += '\n{0}'.format('=' * 64)
        inf_str_summery += '\nTotal params: {0:,}'.format(total_params)
        inf_str_summery += '\nTrainable params: {0:,}'.format(trainable_params)
        inf_str_summery += '\nNon-trainable params: {0:,}'.format(total_params - trainable_params)
        inf_str_summery += '\n{0}'.format('-' * 64)    
        # print('================================================================')
        # print('Total params: {0:,}'.format(total_params))
        # print('Trainable params: {0:,}'.format(trainable_params))
        # print('Non-trainable params: {0:,}'.format(total_params - trainable_params))
        # print('----------------------------------------------------------------')
        print(inf_str_summery)
        return inf_str_summery

if __name__ == '__main__':
    main()