diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a9f70f1
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,138 @@
+output_dir/
+outputs/
+selected/
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+**/*.pyc
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+# custom
+/data
+.vscode
+.idea
+*.pkl
+*.pkl.json
+*.log.json
+benchlist.txt
+work_dirs/
+
+# Pytorch
+*.pth
+
+# Profile
+*.prof
+
+# lmdb
+*.mdb
+
+# unignore some data file in tests/data
+!tests/data/**/*.pkl
+!tests/data/**/*.pkl.json
+!tests/data/**/*.log.json
+!tests/data/**/*.pth
+
+# avoid soft links created by MIM
+mmaction/configs/*
+mmaction/tools/*
diff --git a/README.md b/README.md
index ce47ebb..5e540bb 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,99 @@
# MaskAlign
-> This is the official repository for paper "Stare at What You See: Masked Image Modeling without Reconstruction".
-Stay tuned for new details!
+
+
+
+
+
+This is the official PyTorch repository for paper [Stare at What You See: Masked Image Modeling without Reconstruction](https://arxiv.org/abs/2211.08887):
+```
+@article{xue2022stare,
+ title={Stare at What You See: Masked Image Modeling without Reconstruction},
+ author={Xue, Hongwei and Gao, Peng and Li, Hongyang and Qiao, Yu and Sun, Hao and Li, Houqiang and Luo, Jiebo},
+ journal={arXiv preprint arXiv:2211.08887},
+ year={2022}
+}
+```
+
+* This repo is a modification on the [MAE repo](https://github.com/facebookresearch/mae). Installation and preparation follow that repo.
+
+* The teacher models in this repo are called from [Huggingface](https://huggingface.co/). Please install transformers package by running:
`pip install transformers`.
+
+## Pre-training
+
+To pre-train ViT-base (recommended default) with **distributed training**, run the following on 8 GPUs:
+
+```
+python -m torch.distributed.launch --nproc_per_node=8 main_pretrain.py \
+ --batch_size 128 \
+ --model mae_vit_base_patch16 \
+ --blr 1.5e-4 \
+ --min_lr 1e-5 \
+ --data_path ${IMAGENET_DIR} \
+ --output_dir ${OUTPUT_DIR} \
+ --target_norm whiten \
+ --loss_type smoothl1 \
+ --drop_path 0.1 \
+ --head_type linear \
+ --epochs 200 \
+ --warmup_epochs 20 \
+ --mask_type attention \
+ --mask_ratio 0.7 \
+ --loss_weights top5 \
+ --fusion_type linear \
+ --teacher_model openai/clip-vit-base-patch16
+```
+
+- Here the effective batch size is 128 (`batch_size` per gpu) * 8 (gpus) = 1024. If memory or # gpus is limited, use `--accum_iter` to maintain the effective batch size, which is `batch_size` (per gpu) * `nodes` * 8 (gpus) * `accum_iter`.
+- `blr` is the base learning rate. The actual `lr` is computed by the [linear scaling rule](https://arxiv.org/abs/1706.02677): `lr` = `blr` * effective batch size / 256.
+- This repo will automatically resume the checkpoints by keeping a "latest checkpoint".
+
+To train ViT-Large, please set `--model mae_vit_large_patch16` and `--drop_path 0.2`. Currently, this repo supports three teacher models: `--teacher_model ${TEACHER}`, where `${TEACHER} in openai/clip-vit-base-patch16, openai/clip-vit-large-patch14 and facebook/dino-vitb16`.
+
+## Fine-tuning
+
+Get our pre-trained checkpoints from [here](TODO).
+
+To fine-tune ViT-base (recommended default) with **distributed training**, run the following on 8 GPUs:
+```
+python -m torch.distributed.launch --nproc_per_node=8 main_finetune.py \
+ --epochs 100 \
+ --batch_size 128 \
+ --model vit_base_patch16 \
+ --blr 3e-4 \
+ --layer_decay 0.55 \
+ --weight_decay 0.05 \
+ --drop_path 0.2 \
+ --reprob 0.25 \
+ --mixup 0.8 \
+ --cutmix 1.0 \
+ --dist_eval \
+ --finetune ${PT_CHECKPOINT} \
+ --data_path ${IMAGENET_DIR} \
+ --output_dir ${OUTPUT_DIR}
+```
+
+- Here the effective batch size is 128 (`batch_size` per gpu) * 8 (gpus) = 1024.
+- `blr` is the base learning rate. The actual `lr` is computed by the [linear scaling rule](https://arxiv.org/abs/1706.02677): `lr` = `blr` * effective batch size / 256.
+
+To fine-tune ViT-Large, please set `--model vit_large_patch16 --epochs 50 --drop_path 0.4 --layer_decay 0.75 --blr 3e-4`.
+
+
+## Linear Probing
+
+Run the following on 8 GPUs:
+```
+python -m torch.distributed.launch --nproc_per_node=8 main_linprobe.py \
+ --epochs 90 \
+ --batch_size 2048 \
+ --model vit_base_patch16 \
+ --blr 0.025 \
+ --weight_decay 0.0 \
+ --dist_eval \
+ --finetune ${PT_CHECKPOINT} \
+ --data_path ${IMAGENET_DIR} \
+ --output_dir ${OUTPUT_DIR}
+```
+- Here the effective batch size is 2048 (`batch_size` per gpu) * 8 (gpus) = 16384.
+- `blr` is the base learning rate. The actual `lr` is computed by the [linear scaling rule](https://arxiv.org/abs/1706.02677): `lr` = `blr` * effective batch size / 256.
+
diff --git a/engine_finetune.py b/engine_finetune.py
new file mode 100644
index 0000000..cfa9bd5
--- /dev/null
+++ b/engine_finetune.py
@@ -0,0 +1,130 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# DeiT: https://github.com/facebookresearch/deit
+# BEiT: https://github.com/microsoft/unilm/tree/master/beit
+# --------------------------------------------------------
+
+import math
+import sys
+from typing import Iterable, Optional
+
+import torch
+
+from timm.data import Mixup
+from timm.utils import accuracy
+
+import util.misc as misc
+import util.lr_sched as lr_sched
+
+
+def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
+ data_loader: Iterable, optimizer: torch.optim.Optimizer,
+ device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
+ mixup_fn: Optional[Mixup] = None, log_writer=None,
+ args=None):
+ model.train(True)
+ metric_logger = misc.MetricLogger(delimiter=" ")
+ metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
+ header = 'Epoch: [{}]'.format(epoch)
+ print_freq = 20
+
+ accum_iter = args.accum_iter
+
+ optimizer.zero_grad()
+
+ if log_writer is not None:
+ print('log_dir: {}'.format(log_writer.log_dir))
+
+ for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
+
+ # we use a per iteration (instead of per epoch) lr scheduler
+ if data_iter_step % accum_iter == 0:
+ lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
+
+ samples = samples.to(device, non_blocking=True)
+ targets = targets.to(device, non_blocking=True)
+
+ if mixup_fn is not None:
+ samples, targets = mixup_fn(samples, targets)
+
+ with torch.cuda.amp.autocast():
+ outputs = model(samples)
+ loss = criterion(outputs, targets)
+
+ loss_value = loss.item()
+
+ if not math.isfinite(loss_value):
+ print("Loss is {}, stopping training".format(loss_value))
+ sys.exit(1)
+
+ loss /= accum_iter
+ loss_scaler(loss, optimizer, clip_grad=max_norm,
+ parameters=model.parameters(), create_graph=False,
+ update_grad=(data_iter_step + 1) % accum_iter == 0)
+ if (data_iter_step + 1) % accum_iter == 0:
+ optimizer.zero_grad()
+
+ # torch.cuda.synchronize()
+
+ metric_logger.update(loss=loss_value)
+ min_lr = 10.
+ max_lr = 0.
+ for group in optimizer.param_groups:
+ min_lr = min(min_lr, group["lr"])
+ max_lr = max(max_lr, group["lr"])
+
+ metric_logger.update(lr=max_lr)
+
+ loss_value_reduce = misc.all_reduce_mean(loss_value)
+ if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
+ """ We use epoch_1000x as the x-axis in tensorboard.
+ This calibrates different curves when batch size changes.
+ """
+ epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
+ log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
+ log_writer.add_scalar('lr', max_lr, epoch_1000x)
+
+ # gather the stats from all processes
+ metric_logger.synchronize_between_processes()
+ print("Averaged stats:", metric_logger)
+ return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
+
+
+@torch.no_grad()
+def evaluate(data_loader, model, device):
+ criterion = torch.nn.CrossEntropyLoss()
+
+ metric_logger = misc.MetricLogger(delimiter=" ")
+ header = 'Test:'
+
+ # switch to evaluation mode
+ model.eval()
+
+ for batch in metric_logger.log_every(data_loader, 10, header):
+ images = batch[0]
+ target = batch[-1]
+ images = images.to(device, non_blocking=True)
+ target = target.to(device, non_blocking=True)
+
+ # compute output
+ with torch.cuda.amp.autocast():
+ output = model(images)
+ loss = criterion(output, target)
+
+ acc1, acc5 = accuracy(output, target, topk=(1, 5))
+
+ batch_size = images.shape[0]
+ metric_logger.update(loss=loss.item())
+ metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
+ metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
+ # gather the stats from all processes
+ metric_logger.synchronize_between_processes()
+ print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
+ .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
+
+ return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
diff --git a/engine_pretrain.py b/engine_pretrain.py
new file mode 100644
index 0000000..0a566aa
--- /dev/null
+++ b/engine_pretrain.py
@@ -0,0 +1,95 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# DeiT: https://github.com/facebookresearch/deit
+# BEiT: https://github.com/microsoft/unilm/tree/master/beit
+# --------------------------------------------------------
+import math
+import sys
+from typing import Iterable
+
+import torch
+
+import util.misc as misc
+import util.lr_sched as lr_sched
+
+
+def train_one_epoch(model: torch.nn.Module,
+ data_loader: Iterable, optimizer: torch.optim.Optimizer,
+ device: torch.device, epoch: int, loss_scaler,
+ log_writer=None,
+ args=None):
+ model.train(True)
+ metric_logger = misc.MetricLogger(delimiter=" ")
+ metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
+ header = 'Epoch: [{}]'.format(epoch)
+ print_freq = 20
+
+ accum_iter = args.accum_iter
+
+ optimizer.zero_grad()
+
+ if log_writer is not None:
+ print('log_dir: {}'.format(log_writer.log_dir))
+
+ for data_iter_step, (samples, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
+
+ # we use a per iteration (instead of per epoch) lr scheduler
+ if data_iter_step % accum_iter == 0:
+ lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
+
+ samples = samples.to(device, non_blocking=True)
+
+ with torch.cuda.amp.autocast():
+ loss = model(samples, mask_ratio=args.mask_ratio)
+
+ # handle multiple losses
+ if isinstance(loss, list):
+ loss_list = [i.item() for i in loss]
+ loss = sum(loss)
+ else:
+ loss_list = None
+
+ loss_value = loss.item()
+
+ if not math.isfinite(loss_value):
+ print("Loss is {}, stopping training".format(loss_value))
+ sys.exit(1)
+
+ loss /= accum_iter
+ loss_scaler(loss, optimizer, parameters=model.parameters(),
+ update_grad=(data_iter_step + 1) % accum_iter == 0)
+ if (data_iter_step + 1) % accum_iter == 0:
+ optimizer.zero_grad()
+
+ # torch.cuda.synchronize()
+
+ metric_logger.update(loss=loss_value)
+
+ # handle multiple losses: 2
+ if loss_list is not None:
+ assert len(loss_list) == 2
+ metric_logger.update(loss1=loss_list[0])
+ metric_logger.update(loss2=loss_list[1])
+
+ lr = optimizer.param_groups[0]["lr"]
+ metric_logger.update(lr=lr)
+
+ loss_value_reduce = misc.all_reduce_mean(loss_value)
+ if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
+ """ We use epoch_1000x as the x-axis in tensorboard.
+ This calibrates different curves when batch size changes.
+ """
+ epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
+ log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
+ log_writer.add_scalar('lr', lr, epoch_1000x)
+
+
+ # gather the stats from all processes
+ metric_logger.synchronize_between_processes()
+ print("Averaged stats:", metric_logger)
+ return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
\ No newline at end of file
diff --git a/figs/framework.png b/figs/framework.png
new file mode 100644
index 0000000..821dabd
Binary files /dev/null and b/figs/framework.png differ
diff --git a/main_finetune.py b/main_finetune.py
new file mode 100644
index 0000000..90d2c0d
--- /dev/null
+++ b/main_finetune.py
@@ -0,0 +1,363 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# DeiT: https://github.com/facebookresearch/deit
+# BEiT: https://github.com/microsoft/unilm/tree/master/beit
+# --------------------------------------------------------
+
+import argparse
+import datetime
+import json
+import numpy as np
+import os
+import time
+from pathlib import Path
+
+import torch
+import torch.backends.cudnn as cudnn
+from torch.utils.tensorboard import SummaryWriter
+
+import timm
+
+assert timm.__version__ == "0.3.2" # version check
+from timm.models.layers import trunc_normal_
+from timm.data.mixup import Mixup
+from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
+
+import util.lr_decay as lrd
+import util.misc as misc
+from util.datasets import build_dataset_jpg
+from util.pos_embed import interpolate_pos_embed
+from util.misc import NativeScalerWithGradNormCount as NativeScaler
+
+import models_vit
+
+from engine_finetune import train_one_epoch, evaluate
+
+
+def get_args_parser():
+ parser = argparse.ArgumentParser('MAE fine-tuning for image classification', add_help=False)
+ parser.add_argument('--batch_size', default=64, type=int,
+ help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
+ parser.add_argument('--epochs', default=50, type=int)
+ parser.add_argument('--accum_iter', default=1, type=int,
+ help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
+
+ # Model parameters
+ parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL',
+ help='Name of model to train')
+
+ parser.add_argument('--input_size', default=224, type=int,
+ help='images input size')
+
+ parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
+ help='Drop path rate (default: 0.1)')
+
+ # Optimizer parameters
+ parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
+ help='Clip gradient norm (default: None, no clipping)')
+ parser.add_argument('--weight_decay', type=float, default=0.05,
+ help='weight decay (default: 0.05)')
+
+ parser.add_argument('--lr', type=float, default=None, metavar='LR',
+ help='learning rate (absolute lr)')
+ parser.add_argument('--blr', type=float, default=1e-3, metavar='LR',
+ help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
+ parser.add_argument('--layer_decay', type=float, default=0.75,
+ help='layer-wise lr decay from ELECTRA/BEiT')
+
+ parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
+ help='lower lr bound for cyclic schedulers that hit 0')
+
+ parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
+ help='epochs to warmup LR')
+
+ # Augmentation parameters
+ parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
+ help='Color jitter factor (enabled only when not using Auto/RandAug)')
+ parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
+ help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
+ parser.add_argument('--smoothing', type=float, default=0.1,
+ help='Label smoothing (default: 0.1)')
+
+ # * Random Erase params
+ parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
+ help='Random erase prob (default: 0.25)')
+ parser.add_argument('--remode', type=str, default='pixel',
+ help='Random erase mode (default: "pixel")')
+ parser.add_argument('--recount', type=int, default=1,
+ help='Random erase count (default: 1)')
+ parser.add_argument('--resplit', action='store_true', default=False,
+ help='Do not random erase first (clean) augmentation split')
+
+ # * Mixup params
+ parser.add_argument('--mixup', type=float, default=0,
+ help='mixup alpha, mixup enabled if > 0.')
+ parser.add_argument('--cutmix', type=float, default=0,
+ help='cutmix alpha, cutmix enabled if > 0.')
+ parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
+ help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
+ parser.add_argument('--mixup_prob', type=float, default=1.0,
+ help='Probability of performing mixup or cutmix when either/both is enabled')
+ parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
+ help='Probability of switching to cutmix when both mixup and cutmix enabled')
+ parser.add_argument('--mixup_mode', type=str, default='batch',
+ help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
+
+ # * Finetuning params
+ parser.add_argument('--finetune', default='',
+ help='finetune from checkpoint')
+ parser.add_argument('--global_pool', action='store_true')
+ parser.set_defaults(global_pool=True)
+ parser.add_argument('--cls_token', action='store_false', dest='global_pool',
+ help='Use class token instead of global pool for classification')
+
+ # Dataset parameters
+ parser.add_argument('--data_path', default='/mnt/petrelfs/share/imagenet/images', type=str,
+ help='dataset path')
+ parser.add_argument('--nb_classes', default=1000, type=int,
+ help='number of the classification types')
+
+ parser.add_argument('--output_dir', default='',
+ help='path where to save, empty for no saving')
+ parser.add_argument('--log_dir', default='',
+ help='path where to tensorboard log')
+ parser.add_argument('--device', default='cuda',
+ help='device to use for training / testing')
+ parser.add_argument('--seed', default=0, type=int)
+ parser.add_argument('--resume', default='',
+ help='resume from checkpoint')
+ parser.add_argument('--auto_resume', action='store_true')
+ parser.set_defaults(auto_resume=True)
+
+ parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
+ help='start epoch')
+ parser.add_argument('--eval', action='store_true',
+ help='Perform evaluation only')
+ parser.add_argument('--dist_eval', action='store_true', default=False,
+ help='Enabling distributed evaluation (recommended during training for faster monitor')
+ parser.add_argument('--num_workers', default=10, type=int)
+ parser.add_argument('--pin_mem', action='store_true',
+ help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
+ parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
+ parser.set_defaults(pin_mem=True)
+
+ # distributed training parameters
+ parser.add_argument('--world_size', default=1, type=int,
+ help='number of distributed processes')
+ parser.add_argument('--local_rank', default=-1, type=int)
+ parser.add_argument('--dist_on_itp', action='store_true')
+ parser.add_argument('--dist_url', default='env://',
+ help='url used to set up distributed training')
+
+ return parser
+
+
+def main(args):
+ misc.init_distributed_mode(args)
+
+ print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
+ print("{}".format(args).replace(', ', ',\n'))
+
+ device = torch.device(args.device)
+
+ # fix the seed for reproducibility
+ seed = args.seed + misc.get_rank()
+ torch.manual_seed(seed)
+ np.random.seed(seed)
+
+ cudnn.benchmark = True
+
+ dataset_train = build_dataset_jpg(is_train=True, args=args)
+ dataset_val = build_dataset_jpg(is_train=False, args=args)
+
+ if True: # args.distributed:
+ num_tasks = misc.get_world_size()
+ global_rank = misc.get_rank()
+ sampler_train = torch.utils.data.DistributedSampler(
+ dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
+ )
+ print("Sampler_train = %s" % str(sampler_train))
+ if args.dist_eval:
+ if len(dataset_val) % num_tasks != 0:
+ print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
+ 'This will slightly alter validation results as extra duplicate entries are added to achieve '
+ 'equal num of samples per-process.')
+ sampler_val = torch.utils.data.DistributedSampler(
+ dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
+ else:
+ sampler_val = torch.utils.data.SequentialSampler(dataset_val)
+ else:
+ sampler_train = torch.utils.data.RandomSampler(dataset_train)
+ sampler_val = torch.utils.data.SequentialSampler(dataset_val)
+
+ if global_rank == 0 and args.log_dir is not None and len(args.log_dir) > 0 and not args.eval:
+ os.makedirs(args.log_dir, exist_ok=True)
+ log_writer = SummaryWriter(log_dir=args.log_dir)
+ else:
+ log_writer = None
+
+ data_loader_train = torch.utils.data.DataLoader(
+ dataset_train, sampler=sampler_train,
+ batch_size=args.batch_size,
+ num_workers=args.num_workers,
+ pin_memory=args.pin_mem,
+ drop_last=True,
+ )
+
+ data_loader_val = torch.utils.data.DataLoader(
+ dataset_val, sampler=sampler_val,
+ batch_size=args.batch_size,
+ num_workers=args.num_workers,
+ pin_memory=args.pin_mem,
+ drop_last=False
+ )
+
+ mixup_fn = None
+ mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
+ if mixup_active:
+ print("Mixup is activated!")
+ mixup_fn = Mixup(
+ mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
+ prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
+ label_smoothing=args.smoothing, num_classes=args.nb_classes)
+
+ model = models_vit.__dict__[args.model](
+ num_classes=args.nb_classes,
+ drop_path_rate=args.drop_path,
+ global_pool=args.global_pool,
+ )
+
+ if args.finetune and not args.eval:
+ checkpoint = torch.load(args.finetune, map_location='cpu')
+
+ print("Load pre-trained checkpoint from: %s" % args.finetune)
+ checkpoint_model = checkpoint['model']
+ state_dict = model.state_dict()
+ for k in ['head.weight', 'head.bias']:
+ if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
+ print(f"Removing key {k} from pretrained checkpoint")
+ del checkpoint_model[k]
+
+ # interpolate position embedding
+ interpolate_pos_embed(model, checkpoint_model)
+
+ # load pre-trained model
+ msg = model.load_state_dict(checkpoint_model, strict=False)
+ print(msg)
+
+ if args.global_pool:
+ assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
+ else:
+ assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
+
+ # manually initialize fc layer
+ trunc_normal_(model.head.weight, std=2e-5)
+
+ model.to(device)
+
+ model_without_ddp = model
+ n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
+
+ print("Model = %s" % str(model_without_ddp))
+ print('number of params (M): %.2f' % (n_parameters / 1.e6))
+
+ eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
+
+ if args.lr is None: # only base_lr is specified
+ args.lr = args.blr * eff_batch_size / 256
+
+ print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
+ print("actual lr: %.2e" % args.lr)
+
+ print("accumulate grad iterations: %d" % args.accum_iter)
+ print("effective batch size: %d" % eff_batch_size)
+
+ if args.distributed:
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
+ model_without_ddp = model.module
+
+ # build optimizer with layer-wise lr decay (lrd)
+ param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay,
+ no_weight_decay_list=model_without_ddp.no_weight_decay(),
+ layer_decay=args.layer_decay
+ )
+ optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
+ loss_scaler = NativeScaler()
+
+ if mixup_fn is not None:
+ # smoothing is handled with mixup label transform
+ criterion = SoftTargetCrossEntropy()
+ elif args.smoothing > 0.:
+ criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
+ else:
+ criterion = torch.nn.CrossEntropyLoss()
+
+ print("criterion = %s" % str(criterion))
+
+ misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
+
+ if args.eval:
+ test_stats = evaluate(data_loader_val, model, device)
+ print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
+ exit(0)
+
+ print(f"Start training for {args.epochs} epochs")
+ start_time = time.time()
+ max_accuracy = 0.0
+ for epoch in range(args.start_epoch, args.epochs):
+ if args.distributed:
+ data_loader_train.sampler.set_epoch(epoch)
+ train_stats = train_one_epoch(
+ model, criterion, data_loader_train,
+ optimizer, device, epoch, loss_scaler,
+ args.clip_grad, mixup_fn,
+ log_writer=log_writer,
+ args=args
+ )
+ if args.output_dir:
+ misc.save_model_latest(
+ args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
+ loss_scaler=loss_scaler, epoch=epoch)
+
+ if args.output_dir and (epoch % 99 == 0 or epoch + 1 == args.epochs):
+ misc.save_model(
+ args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
+ loss_scaler=loss_scaler, epoch=epoch)
+
+ test_stats = evaluate(data_loader_val, model, device)
+ print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
+ max_accuracy = max(max_accuracy, test_stats["acc1"])
+ print(f'Max accuracy: {max_accuracy:.2f}%')
+
+ if log_writer is not None:
+ log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
+ log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
+ log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
+
+ log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
+ **{f'test_{k}': v for k, v in test_stats.items()},
+ 'epoch': epoch,
+ 'n_parameters': n_parameters}
+
+ if args.output_dir and misc.is_main_process():
+ if log_writer is not None:
+ log_writer.flush()
+ with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
+ f.write(json.dumps(log_stats) + "\n")
+
+ total_time = time.time() - start_time
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
+ print('Training time {}'.format(total_time_str))
+
+
+if __name__ == '__main__':
+ args = get_args_parser()
+ args = args.parse_args()
+ if args.output_dir:
+ Path(args.output_dir).mkdir(parents=True, exist_ok=True)
+ main(args)
diff --git a/main_linprobe.py b/main_linprobe.py
new file mode 100644
index 0000000..35734cd
--- /dev/null
+++ b/main_linprobe.py
@@ -0,0 +1,326 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# DeiT: https://github.com/facebookresearch/deit
+# MoCo v3: https://github.com/facebookresearch/moco-v3
+# --------------------------------------------------------
+
+import argparse
+import datetime
+import json
+import numpy as np
+import os
+import time
+from pathlib import Path
+
+import torch
+import torch.backends.cudnn as cudnn
+from torch.utils.tensorboard import SummaryWriter
+import torchvision.transforms as transforms
+import torchvision.datasets as datasets
+
+import timm
+
+assert timm.__version__ == "0.3.2" # version check
+from timm.models.layers import trunc_normal_
+
+import util.misc as misc
+from util.pos_embed import interpolate_pos_embed
+from util.misc import NativeScalerWithGradNormCount as NativeScaler
+from util.lars import LARS
+from util.crop import RandomResizedCrop
+
+import models_vit
+
+from util.datasets import ImageNet1k_JPG
+from engine_finetune import train_one_epoch, evaluate
+
+
+def get_args_parser():
+ parser = argparse.ArgumentParser('MAE linear probing for image classification', add_help=False)
+ parser.add_argument('--batch_size', default=512, type=int,
+ help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
+ parser.add_argument('--epochs', default=90, type=int)
+ parser.add_argument('--accum_iter', default=1, type=int,
+ help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
+
+ # Model parameters
+ parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL',
+ help='Name of model to train')
+
+ # Optimizer parameters
+ parser.add_argument('--weight_decay', type=float, default=0,
+ help='weight decay (default: 0 for linear probe following MoCo v1)')
+
+ parser.add_argument('--lr', type=float, default=None, metavar='LR',
+ help='learning rate (absolute lr)')
+ parser.add_argument('--blr', type=float, default=0.1, metavar='LR',
+ help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
+
+ parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
+ help='lower lr bound for cyclic schedulers that hit 0')
+
+ parser.add_argument('--warmup_epochs', type=int, default=10, metavar='N',
+ help='epochs to warmup LR')
+
+ # * Finetuning params
+ parser.add_argument('--finetune', default='',
+ help='finetune from checkpoint')
+ parser.add_argument('--global_pool', action='store_true')
+ parser.set_defaults(global_pool=False)
+ parser.add_argument('--cls_token', action='store_false', dest='global_pool',
+ help='Use class token instead of global pool for classification')
+
+ # Dataset parameters
+ parser.add_argument('--data_path', default='/mnt/petrelfs/share/imagenet/images', type=str,
+ help='dataset path')
+ parser.add_argument('--nb_classes', default=1000, type=int,
+ help='number of the classification types')
+
+ parser.add_argument('--output_dir', default='',
+ help='path where to save, empty for no saving')
+ parser.add_argument('--log_dir', default='',
+ help='path where to tensorboard log')
+ parser.add_argument('--device', default='cuda',
+ help='device to use for training / testing')
+ parser.add_argument('--seed', default=0, type=int)
+ parser.add_argument('--resume', default='',
+ help='resume from checkpoint')
+ parser.add_argument('--auto_resume', action='store_true')
+ parser.set_defaults(auto_resume=True)
+
+ parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
+ help='start epoch')
+ parser.add_argument('--eval', action='store_true',
+ help='Perform evaluation only')
+ parser.add_argument('--dist_eval', action='store_true', default=False,
+ help='Enabling distributed evaluation (recommended during training for faster monitor')
+ parser.add_argument('--num_workers', default=4, type=int)
+ parser.add_argument('--pin_mem', action='store_true',
+ help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
+ parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
+ parser.set_defaults(pin_mem=True)
+
+ # distributed training parameters
+ parser.add_argument('--world_size', default=1, type=int,
+ help='number of distributed processes')
+ parser.add_argument('--local_rank', default=-1, type=int)
+ parser.add_argument('--dist_on_itp', action='store_true')
+ parser.add_argument('--dist_url', default='env://',
+ help='url used to set up distributed training')
+
+ return parser
+
+
+def main(args):
+ misc.init_distributed_mode(args)
+
+ print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
+ print("{}".format(args).replace(', ', ',\n'))
+
+ device = torch.device(args.device)
+
+ # fix the seed for reproducibility
+ seed = args.seed + misc.get_rank()
+ torch.manual_seed(seed)
+ np.random.seed(seed)
+
+ cudnn.benchmark = True
+
+ # linear probe: weak augmentation
+ transform_train = transforms.Compose([
+ RandomResizedCrop(224, interpolation=3),
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
+ transform_val = transforms.Compose([
+ transforms.Resize(256, interpolation=3),
+ transforms.CenterCrop(224),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
+ # dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
+ # dataset_val = datasets.ImageFolder(os.path.join(args.data_path, 'val'), transform=transform_val)
+ dataset_train = ImageNet1k_JPG(image_root=os.path.join(args.data_path, 'train'), meta_path=os.path.join(args.data_path, 'meta', 'train.txt'), transform=transform_train)
+ dataset_val = ImageNet1k_JPG(image_root=os.path.join(args.data_path, 'val'), meta_path=os.path.join(args.data_path, 'meta', 'val.txt'), transform=transform_val)
+ print(dataset_train)
+ print(dataset_val)
+
+ if True: # args.distributed:
+ num_tasks = misc.get_world_size()
+ global_rank = misc.get_rank()
+ sampler_train = torch.utils.data.DistributedSampler(
+ dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
+ )
+ print("Sampler_train = %s" % str(sampler_train))
+ if args.dist_eval:
+ if len(dataset_val) % num_tasks != 0:
+ print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
+ 'This will slightly alter validation results as extra duplicate entries are added to achieve '
+ 'equal num of samples per-process.')
+ sampler_val = torch.utils.data.DistributedSampler(
+ dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
+ else:
+ sampler_val = torch.utils.data.SequentialSampler(dataset_val)
+ else:
+ sampler_train = torch.utils.data.RandomSampler(dataset_train)
+ sampler_val = torch.utils.data.SequentialSampler(dataset_val)
+
+ if global_rank == 0 and args.log_dir is not None and len(args.log_dir) > 0 and not args.eval:
+ os.makedirs(args.log_dir, exist_ok=True)
+ log_writer = SummaryWriter(log_dir=args.log_dir)
+ else:
+ log_writer = None
+
+ data_loader_train = torch.utils.data.DataLoader(
+ dataset_train, sampler=sampler_train,
+ batch_size=args.batch_size,
+ num_workers=args.num_workers,
+ pin_memory=args.pin_mem,
+ drop_last=True,
+ )
+
+ data_loader_val = torch.utils.data.DataLoader(
+ dataset_val, sampler=sampler_val,
+ batch_size=args.batch_size,
+ num_workers=args.num_workers,
+ pin_memory=args.pin_mem,
+ drop_last=False
+ )
+
+ model = models_vit.__dict__[args.model](
+ num_classes=args.nb_classes,
+ global_pool=args.global_pool,
+ )
+
+ if args.finetune and not args.eval:
+ checkpoint = torch.load(args.finetune, map_location='cpu')
+
+ print("Load pre-trained checkpoint from: %s" % args.finetune)
+ checkpoint_model = checkpoint['model']
+ state_dict = model.state_dict()
+ for k in ['head.weight', 'head.bias']:
+ if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
+ print(f"Removing key {k} from pretrained checkpoint")
+ del checkpoint_model[k]
+
+ # interpolate position embedding
+ interpolate_pos_embed(model, checkpoint_model)
+
+ # load pre-trained model
+ msg = model.load_state_dict(checkpoint_model, strict=False)
+ print(msg)
+
+ if args.global_pool:
+ assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
+ else:
+ assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
+
+ # manually initialize fc layer: following MoCo v3
+ trunc_normal_(model.head.weight, std=0.01)
+
+ # for linear prob only
+ # hack: revise model's head with BN
+ model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-6), model.head)
+ # freeze all but the head
+ for _, p in model.named_parameters():
+ p.requires_grad = False
+ for _, p in model.head.named_parameters():
+ p.requires_grad = True
+
+ model.to(device)
+
+ model_without_ddp = model
+ n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
+
+ print("Model = %s" % str(model_without_ddp))
+ print('number of params (M): %.2f' % (n_parameters / 1.e6))
+
+ eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
+
+ if args.lr is None: # only base_lr is specified
+ args.lr = args.blr * eff_batch_size / 256
+
+ print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
+ print("actual lr: %.2e" % args.lr)
+
+ print("accumulate grad iterations: %d" % args.accum_iter)
+ print("effective batch size: %d" % eff_batch_size)
+
+ if args.distributed:
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
+ model_without_ddp = model.module
+
+ optimizer = LARS(model_without_ddp.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
+ print(optimizer)
+ loss_scaler = NativeScaler()
+
+ criterion = torch.nn.CrossEntropyLoss()
+
+ print("criterion = %s" % str(criterion))
+
+ misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
+
+ if args.eval:
+ test_stats = evaluate(data_loader_val, model, device)
+ print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
+ exit(0)
+
+ print(f"Start training for {args.epochs} epochs")
+ start_time = time.time()
+ max_accuracy = 0.0
+ for epoch in range(args.start_epoch, args.epochs):
+ if args.distributed:
+ data_loader_train.sampler.set_epoch(epoch)
+ train_stats = train_one_epoch(
+ model, criterion, data_loader_train,
+ optimizer, device, epoch, loss_scaler,
+ max_norm=None,
+ log_writer=log_writer,
+ args=args
+ )
+ if args.output_dir:
+ misc.save_model_latest(
+ args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
+ loss_scaler=loss_scaler, epoch=epoch)
+
+ if args.output_dir and (epoch % 10 == 0 or epoch + 1 == args.epochs):
+ misc.save_model(
+ args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
+ loss_scaler=loss_scaler, epoch=epoch)
+
+ test_stats = evaluate(data_loader_val, model, device)
+ print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
+ max_accuracy = max(max_accuracy, test_stats["acc1"])
+ print(f'Max accuracy: {max_accuracy:.2f}%')
+
+ if log_writer is not None:
+ log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
+ log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
+ log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
+
+ log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
+ **{f'test_{k}': v for k, v in test_stats.items()},
+ 'epoch': epoch,
+ 'n_parameters': n_parameters}
+
+ if args.output_dir and misc.is_main_process():
+ if log_writer is not None:
+ log_writer.flush()
+ with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
+ f.write(json.dumps(log_stats) + "\n")
+
+ total_time = time.time() - start_time
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
+ print('Training time {}'.format(total_time_str))
+
+
+if __name__ == '__main__':
+ args = get_args_parser()
+ args = args.parse_args()
+ if args.output_dir:
+ Path(args.output_dir).mkdir(parents=True, exist_ok=True)
+ main(args)
diff --git a/main_pretrain.py b/main_pretrain.py
new file mode 100644
index 0000000..ac0e2d3
--- /dev/null
+++ b/main_pretrain.py
@@ -0,0 +1,253 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# DeiT: https://github.com/facebookresearch/deit
+# BEiT: https://github.com/microsoft/unilm/tree/master/beit
+# --------------------------------------------------------
+import argparse
+import datetime
+import json
+import numpy as np
+import os
+import time
+from pathlib import Path
+
+import torch
+import torch.backends.cudnn as cudnn
+from torch.utils.tensorboard import SummaryWriter
+import torchvision.transforms as transforms
+import torchvision.datasets as datasets
+
+import timm
+
+assert timm.__version__ == "0.3.2" # version check
+import timm.optim.optim_factory as optim_factory
+
+import util.misc as misc
+from util.misc import NativeScalerWithGradNormCount as NativeScaler
+
+import models_pretrain as models_mae
+
+from engine_pretrain import train_one_epoch
+from util.datasets import ImageNet1k_JPG
+
+
+def get_args_parser():
+ parser = argparse.ArgumentParser('MAE pre-training', add_help=False)
+
+ # Add new args
+ parser.add_argument('--loss_weights', default="mean", type=str,
+ help='Loss weights of each block in ViT.')
+ parser.add_argument('--mask_type', default="random", type=str,
+ help='Mask type in random, attention.')
+ parser.add_argument('--fusion_type', default="simple", type=str,
+ help='Fusion type in distillation.')
+ parser.add_argument('--target_norm', default="none", type=str,
+ help='target norm type in teacher model.')
+ parser.add_argument('--loss_type', default="l2", type=str,
+ help='loss type for feature reconstruction.')
+ parser.add_argument('--head_type', default="linear", type=str,
+ help='head type for feature reconstruction.')
+ parser.add_argument('--teacher_model', default="openai/clip-vit-base-patch16", type=str,
+ help='teacher model for feature reconstruction.')
+
+ parser.add_argument('--batch_size', default=64, type=int,
+ help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
+ parser.add_argument('--epochs', default=400, type=int)
+ parser.add_argument('--accum_iter', default=1, type=int,
+ help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
+
+ # Model parameters
+ parser.add_argument('--model', default='mae_vit_large_patch16', type=str, metavar='MODEL',
+ help='Name of model to train')
+
+ parser.add_argument('--input_size', default=224, type=int,
+ help='images input size')
+
+ parser.add_argument('--mask_ratio', default=0.75, type=float,
+ help='Masking ratio (percentage of removed patches).')
+
+ parser.add_argument('--norm_pix_loss', action='store_true',
+ help='Use (per-patch) normalized pixels as targets for computing loss')
+ parser.set_defaults(norm_pix_loss=False)
+
+ parser.add_argument('--drop_path', type=float, default=0.,
+ help='drop path rate (default: 0.)')
+
+ # Optimizer parameters
+ parser.add_argument('--weight_decay', type=float, default=0.05,
+ help='weight decay (default: 0.05)')
+
+ parser.add_argument('--lr', type=float, default=None, metavar='LR',
+ help='learning rate (absolute lr)')
+ parser.add_argument('--blr', type=float, default=1e-3, metavar='LR',
+ help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
+ parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
+ help='lower lr bound for cyclic schedulers that hit 0')
+
+ parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N',
+ help='epochs to warmup LR')
+
+ # Dataset parameters
+ parser.add_argument('--data_path', default='/mnt/petrelfs/share/imagenet/images', type=str,
+ help='dataset path')
+
+ parser.add_argument('--output_dir', default='',
+ help='path where to save, empty for no saving')
+ parser.add_argument('--log_dir', default='',
+ help='path where to tensorboard log')
+ parser.add_argument('--device', default='cuda',
+ help='device to use for training / testing')
+ parser.add_argument('--seed', default=0, type=int)
+ parser.add_argument('--resume', default='',
+ help='resume from checkpoint')
+ parser.add_argument('--auto_resume', action='store_true')
+ parser.set_defaults(auto_resume=True)
+
+ parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
+ help='start epoch')
+ parser.add_argument('--num_workers', default=10, type=int)
+ parser.add_argument('--pin_mem', action='store_true',
+ help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
+ parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
+ parser.set_defaults(pin_mem=True)
+
+ # distributed training parameters
+ parser.add_argument('--world_size', default=1, type=int,
+ help='number of distributed processes')
+ parser.add_argument('--local_rank', default=-1, type=int)
+ parser.add_argument('--dist_on_itp', action='store_true')
+ parser.add_argument('--dist_url', default='env://',
+ help='url used to set up distributed training')
+
+ return parser
+
+
+def main(args):
+ misc.init_distributed_mode(args)
+
+ print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
+ print("{}".format(args).replace(', ', ',\n'))
+
+ device = torch.device(args.device)
+
+ # fix the seed for reproducibility
+ seed = args.seed + misc.get_rank()
+ torch.manual_seed(seed)
+ np.random.seed(seed)
+
+ cudnn.benchmark = True
+
+ # simple augmentation
+ transform_train = transforms.Compose([
+ transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), # 3 is bicubic
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
+
+ dataset_train = ImageNet1k_JPG(image_root=os.path.join(args.data_path, 'train'), meta_path=os.path.join(args.data_path, 'meta', 'train.txt'), transform=transform_train)
+ print(dataset_train)
+
+ if True: # args.distributed:
+ num_tasks = misc.get_world_size()
+ global_rank = misc.get_rank()
+ sampler_train = torch.utils.data.DistributedSampler(
+ dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
+ )
+ print("Sampler_train = %s" % str(sampler_train))
+ else:
+ sampler_train = torch.utils.data.RandomSampler(dataset_train)
+
+ if global_rank == 0 and args.log_dir is not None and len(args.log_dir) > 0:
+ os.makedirs(args.log_dir, exist_ok=True)
+ log_writer = SummaryWriter(log_dir=args.log_dir)
+ else:
+ log_writer = None
+
+ data_loader_train = torch.utils.data.DataLoader(
+ dataset_train, sampler=sampler_train,
+ batch_size=args.batch_size,
+ num_workers=args.num_workers,
+ pin_memory=args.pin_mem,
+ drop_last=True,
+ )
+
+ # define the model
+ model = models_mae.__dict__[args.model](norm_pix_loss=args.norm_pix_loss, drop_path_rate=args.drop_path, \
+ loss_weights=args.loss_weights, loss_type=args.loss_type, \
+ mask_type=args.mask_type, fusion_type=args.fusion_type, target_norm=args.target_norm,
+ head_type=args.head_type, teacher_model=args.teacher_model)
+
+ model.to(device)
+
+ model_without_ddp = model
+ print("Model = %s" % str(model_without_ddp))
+
+ eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
+
+ if args.lr is None: # only base_lr is specified
+ args.lr = args.blr * eff_batch_size / 256
+
+ print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
+ print("actual lr: %.2e" % args.lr)
+
+ print("accumulate grad iterations: %d" % args.accum_iter)
+ print("effective batch size: %d" % eff_batch_size)
+
+ if args.distributed:
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
+ model_without_ddp = model.module
+
+ # following timm: set wd as 0 for bias and norm layers
+ param_groups = optim_factory.add_weight_decay(model_without_ddp, args.weight_decay, skip_list=["distill_weights"])
+ optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))
+ print(optimizer)
+ loss_scaler = NativeScaler()
+
+ misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
+
+ print(f"Start training for {args.epochs} epochs")
+ start_time = time.time()
+ for epoch in range(args.start_epoch, args.epochs):
+ if args.distributed:
+ data_loader_train.sampler.set_epoch(epoch)
+ train_stats = train_one_epoch(
+ model, data_loader_train,
+ optimizer, device, epoch, loss_scaler,
+ log_writer=log_writer,
+ args=args
+ )
+ if args.output_dir:
+ misc.save_model_latest(
+ args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
+ loss_scaler=loss_scaler, epoch=epoch)
+
+ if args.output_dir and (epoch % 50 == 0 or epoch + 1 == args.epochs):
+ misc.save_model(
+ args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
+ loss_scaler=loss_scaler, epoch=epoch)
+
+ log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
+ 'epoch': epoch,}
+
+ if args.output_dir and misc.is_main_process():
+ if log_writer is not None:
+ log_writer.flush()
+ with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
+ f.write(json.dumps(log_stats) + "\n")
+
+ total_time = time.time() - start_time
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
+ print('Training time {}'.format(total_time_str))
+
+
+if __name__ == '__main__':
+ args = get_args_parser()
+ args = args.parse_args()
+ if args.output_dir:
+ Path(args.output_dir).mkdir(parents=True, exist_ok=True)
+ main(args)
diff --git a/models_pretrain.py b/models_pretrain.py
new file mode 100644
index 0000000..4ded48f
--- /dev/null
+++ b/models_pretrain.py
@@ -0,0 +1,392 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
+# DeiT: https://github.com/facebookresearch/deit
+# --------------------------------------------------------
+
+from functools import partial
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from timm.models.vision_transformer import PatchEmbed, Block
+
+from util.pos_embed import get_2d_sincos_pos_embed
+from transformers import CLIPVisionModel, ViTModel
+import pdb
+
+
+def resize_pos_embed(x):
+ # [256, C] -> [196, C]
+ C = x.shape[-1]
+ x = x.reshape(1, 16, 16, C).permute(0, 3, 1, 2)
+ x = F.interpolate(x, (14, 14), mode='bicubic', align_corners=False)
+ x = x.permute(0, 2, 3, 1).reshape(196, C)
+ return x
+
+
+class MaskedAutoencoderViT(nn.Module):
+ """ Masked Autoencoder with VisionTransformer backbone
+ """
+ def __init__(self, img_size=224, patch_size=16, in_chans=3,
+ embed_dim=1024, depth=24, num_heads=16, drop_path_rate=0.,
+ mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False,
+ loss_weights="mean", mask_type="random", fusion_type="simple", target_norm="none", loss_type="l2",
+ head_type="linear", teacher_model="openai/clip-vit-base-patch16"):
+ super().__init__()
+
+ assert loss_weights in ["mean", "out", "linear_decay"] or "top" in loss_weights or "mid" in loss_weights
+ self.loss_weights = loss_weights
+ assert mask_type in ["random", "attention"]
+ self.mask_type = mask_type
+ assert fusion_type in ["simple", "linear", "sum"]
+ self.fusion_type = fusion_type
+ assert target_norm in ["none", "l2", "whiten", "bn"]
+ self.target_norm = target_norm
+ assert loss_type in ["l2", "l1", "smoothl1"]
+ self.loss_type = loss_type
+ assert head_type in ["linear", "norm_linear", "mlp", "mlp2"]
+ self.head_type= head_type
+ # assert "clip" in teacher_model or "dino" in teacher_model
+ self.teacher_model_name = teacher_model
+
+ # --------------------------------------------------------------------------
+ # MAE encoder specifics
+ self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
+ num_patches = self.patch_embed.num_patches
+
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) # fixed sin-cos embedding
+
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
+ self.blocks = nn.ModuleList([
+ Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer, drop_path=dpr[i])
+ for i in range(depth)])
+ self.norm = norm_layer(embed_dim)
+
+ if "clip-vit-base-patch16" in self.teacher_model_name or "dino-vitb16" in self.teacher_model_name:
+ target_dim = 768
+ teacher_depth = 12
+ else:
+ target_dim = 1024
+ teacher_depth = 24
+
+ if self.head_type == "linear":
+ self.distill_heads = nn.ModuleList([nn.Linear(embed_dim, target_dim) for i in range(teacher_depth)])
+ elif self.head_type == "norm_linear":
+ self.distill_heads = nn.ModuleList([nn.Sequential(
+ norm_layer(embed_dim),
+ nn.Linear(embed_dim, target_dim)
+ )
+ for i in range(teacher_depth)])
+ elif self.head_type == "mlp":
+ self.distill_heads = nn.ModuleList([nn.Sequential(
+ nn.Linear(embed_dim, embed_dim),
+ nn.GELU(),
+ nn.Linear(embed_dim, target_dim)
+ )
+ for i in range(teacher_depth)])
+ elif self.head_type == "mlp2":
+ self.distill_heads = nn.ModuleList([nn.Sequential(
+ nn.Linear(embed_dim, embed_dim),
+ norm_layer(embed_dim),
+ nn.Linear(embed_dim, target_dim)
+ )
+ for i in range(teacher_depth)])
+
+ if self.fusion_type == "linear":
+ # only len(student) == len(teacher)
+ self.distill_weights = nn.Parameter(torch.eye(len(self.blocks)) + 0.01, requires_grad=True)
+ elif self.fusion_type == "sum":
+ self.distill_weights = nn.Parameter(torch.ones(teacher_depth, len(self.blocks)) / len(self.blocks), requires_grad=True)
+
+ self.initialize_weights()
+
+ if "clip" in self.teacher_model_name:
+ self.clip_model = CLIPVisionModel.from_pretrained(self.teacher_model_name)
+ for name, param in self.clip_model.named_parameters():
+ param.requires_grad = False
+ if "clip-vit-large-patch14" in self.teacher_model_name and "position_embedding" in name:
+ param.data = torch.cat([param.data[:1], resize_pos_embed(param.data[1:])], dim=0)
+ if "clip-vit-large-patch14" in self.teacher_model_name:
+ self.clip_model.vision_model.embeddings.position_ids = torch.arange(197).expand((1, -1))
+
+ elif "dino" in self.teacher_model_name:
+ self.dino_model = ViTModel.from_pretrained(self.teacher_model_name)
+ for param in self.dino_model.parameters():
+ param.requires_grad = False
+
+ def initialize_weights(self):
+ # initialization
+ # initialize (and freeze) pos_embed by sin-cos embedding
+ pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
+ self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
+
+ # initialize patch_embed like nn.Linear (instead of nn.Conv2d)
+ w = self.patch_embed.proj.weight.data
+ torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
+
+ # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
+ torch.nn.init.normal_(self.cls_token, std=.02)
+ # torch.nn.init.normal_(self.mask_token, std=.02)
+
+ # initialize nn.Linear and nn.LayerNorm
+ self.apply(self._init_weights)
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ # we use xavier_uniform following official JAX ViT:
+ torch.nn.init.xavier_uniform_(m.weight)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ def denormalize(self, images, type="imagenet"):
+ # sr_images [B, 3, H, W]
+ mean = torch.tensor([0.485, 0.456, 0.406], device=images.device).view(1, 3, 1, 1).type_as(images)
+ std = torch.tensor([0.229, 0.224, 0.225], device=images.device).view(1, 3, 1, 1).type_as(images)
+ return std*images + mean
+
+ def normalize(self, images, type="clip"):
+ # images [B, 3, h, w]
+ mean = torch.tensor([0.48145466, 0.4578275, 0.40821073], device=images.device).view(1, 3, 1, 1).type_as(images)
+ std = torch.tensor([0.26862954, 0.26130258, 0.27577711], device=images.device).view(1, 3, 1, 1).type_as(images)
+ return (images - mean) / std
+
+ def patchify(self, imgs):
+ """
+ imgs: (N, 3, H, W)
+ x: (N, L, patch_size**2 *3)
+ """
+ p = self.patch_embed.patch_size[0]
+ assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
+
+ h = w = imgs.shape[2] // p
+ x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
+ x = torch.einsum('nchpwq->nhwpqc', x)
+ x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))
+ return x
+
+ def unpatchify(self, x):
+ """
+ x: (N, L, patch_size**2 *3)
+ imgs: (N, 3, H, W)
+ """
+ p = self.patch_embed.patch_size[0]
+ h = w = int(x.shape[1]**.5)
+ assert h * w == x.shape[1]
+
+ x = x.reshape(shape=(x.shape[0], h, w, p, p, 3))
+ x = torch.einsum('nhwpqc->nchpwq', x)
+ imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p))
+ return imgs
+
+ def random_masking(self, x, mask_ratio):
+ """
+ Perform per-sample random masking by per-sample shuffling.
+ Per-sample shuffling is done by argsort random noise.
+ x: [N, L, D], sequence
+ """
+ N, L, D = x.shape # batch, length, dim
+ len_keep = int(L * (1 - mask_ratio))
+
+ noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
+
+ # sort noise for each sample
+ ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
+ ids_restore = torch.argsort(ids_shuffle, dim=1)
+
+ # keep the first subset
+ ids_keep = ids_shuffle[:, :len_keep]
+ x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
+
+ # generate the binary mask: 0 is keep, 1 is remove
+ mask = torch.ones([N, L], device=x.device)
+ mask[:, :len_keep] = 0
+ # unshuffle to get the binary mask
+ mask = torch.gather(mask, dim=1, index=ids_restore)
+
+ return x_masked, ids_keep
+
+ def attention_masking(self, x, mask_ratio, importance):
+ """
+ Perform per-sample random masking by per-sample shuffling.
+ Per-sample shuffling is done by argsort random noise.
+ x: [N, L, D], sequence
+ """
+ N, L, D = x.shape # batch, length, dim
+ len_keep = int(L * (1 - mask_ratio))
+
+ noise = importance.to(x.device) # large is keep, small is remove
+
+ # sort noise for each sample
+ ids_shuffle = torch.multinomial(noise, L, replacement=False)
+ ids_restore = torch.argsort(ids_shuffle, dim=1)
+
+ # keep the first subset
+ ids_keep = ids_shuffle[:, :len_keep]
+ ids_dump = ids_shuffle[:, len_keep:]
+ x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
+
+ # generate the binary mask: 0 is keep, 1 is remove
+ mask = torch.ones([N, L], device=x.device)
+ mask[:, :len_keep] = 0
+ # unshuffle to get the binary mask
+ mask = torch.gather(mask, dim=1, index=ids_restore)
+
+ return x_masked, ids_keep
+
+ def forward_encoder(self, x, mask_ratio, attentions):
+ # embed patches
+ x = self.patch_embed(x)
+
+ # add pos embed w/o cls token
+ x = x + self.pos_embed[:, 1:, :]
+
+ # masking: length -> length * mask_ratio
+ if self.mask_type == "attention":
+ importance = attentions[-1][:, :, 0, 1:].mean(1)
+ x, ids_keep = self.attention_masking(x, mask_ratio, importance)
+ else:
+ x, ids_keep = self.random_masking(x, mask_ratio)
+
+ cls_token = self.cls_token + self.pos_embed[:, :1, :]
+ cls_tokens = cls_token.expand(x.shape[0], -1, -1)
+ x = torch.cat((cls_tokens, x), dim=1)
+
+ hidden_states = []
+ # apply Transformer blocks
+ for blk in self.blocks:
+ x = blk(x)
+ hidden_states.append(x)
+ x = self.norm(x)
+
+ return hidden_states, ids_keep
+
+ @torch.no_grad()
+ def forward_clip(self, x):
+ if "clip-vit-large-patch14" in self.teacher_model_name:
+ x = F.interpolate(x, (196, 196), mode='bicubic', align_corners=False)
+
+ x = self.normalize(self.denormalize(x))
+ input = {
+ "pixel_values": x,
+ "output_hidden_states": True,
+ "output_attentions": True
+ }
+ outputs = self.clip_model(**input)
+
+ last_hidden_state, pooler_output, hidden_states, attentions = outputs[0], outputs[1], outputs[2], outputs[3]
+ return last_hidden_state, pooler_output, hidden_states, attentions
+
+ @torch.no_grad()
+ def forward_dino(self, x):
+ input = {
+ "pixel_values": x,
+ "output_hidden_states": True,
+ "output_attentions": True
+ }
+ outputs = self.dino_model(**input)
+
+ last_hidden_state, pooler_output, hidden_states, attentions = outputs[0], outputs[1], outputs[2], outputs[3]
+ return last_hidden_state, pooler_output, hidden_states, attentions
+
+
+ def get_student(self, hidden_states):
+ student = hidden_states
+ if self.fusion_type != "simple":
+ student = [x.unsqueeze(0) for x in student]
+ student = torch.cat(student, dim=0)
+ student = torch.einsum('ab,bcde->acde', self.distill_weights, student)
+ student = torch.chunk(student, student.shape[0], dim=0)
+ student = [x.squeeze(0) for x in student]
+ student = [self.distill_heads[i](x) for i, x in enumerate(student)]
+ return student
+
+ def get_teacher(self, hidden_states, ids_keep):
+ teacher = []
+ for i in range(1, len(hidden_states)):
+ y = hidden_states[i]
+ if self.target_norm == "l2":
+ y = F.normalize(y, dim=-1)
+ elif self.target_norm == "whiten":
+ y = F.layer_norm(y, (y.shape[-1],))
+ elif self.target_norm == "bn":
+ y = (y - y.mean()) / (y.var() + 1.e-6)**.5
+ cls = y[:, :1, :]
+ y = y[:, 1:, :]
+ y = torch.gather(y, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, y.shape[-1]))
+ teacher.append(torch.cat([cls, y], dim=1))
+ return teacher
+
+ def forward_loss(self, student, teacher):
+ """
+ student: ([B*4, L//4, C]...)
+ teacher: ([B, 1+L, C]...)
+ ids_shuffle: [B, L]
+ """
+ loss = torch.tensor(0., device=student[0].device)
+
+ if self.loss_weights == "mean":
+ weight_list = [1/len(student)]*len(student)
+ elif self.loss_weights == "out":
+ weight_list = [0.]*(len(student)-1) + [1.]
+ elif self.loss_weights == "linear_decay":
+ weight_list_ = list(range(len(student)))
+ weight_list = [i / sum(weight_list_) for i in weight_list_]
+ elif "top" in self.loss_weights: # topk
+ topk = int(self.loss_weights[3:])
+ weight_list = [0.] * (len(student)-topk) + [1/topk] * topk
+ elif "mid" in self.loss_weights:
+ mid = int(self.loss_weights[3:])
+ weight_list = [0.] * mid + [1.] + [0.] * (len(student) - mid - 1)
+
+ for i, x in enumerate(student):
+ y = teacher[i]
+ if weight_list[i] > 0:
+ if self.loss_type == "l2":
+ loss = loss + weight_list[i] * ((y - x) ** 2).mean()
+ elif self.loss_type == "smoothl1":
+ loss = loss + weight_list[i] * 2 * F.smooth_l1_loss(y, x)
+ elif self.loss_type == "l1":
+ loss = loss + weight_list[i] * F.l1_loss(y, x)
+ return loss
+
+ def forward(self, imgs, mask_ratio=0.75):
+ if "clip" in self.teacher_model_name:
+ _, _, hidden_states_teacher, attentions = self.forward_clip(imgs)
+ elif "dino" in self.teacher_model_name:
+ _, _, hidden_states_teacher, attentions = self.forward_dino(imgs)
+ hidden_states, ids_keep = self.forward_encoder(imgs, mask_ratio, attentions)
+ student = self.get_student(hidden_states)
+ teacher = self.get_teacher(hidden_states_teacher, ids_keep)
+ loss = self.forward_loss(student, teacher)
+ return loss
+
+
+def mae_vit_base_patch16(**kwargs):
+ model = MaskedAutoencoderViT(
+ patch_size=16, embed_dim=768, depth=12, num_heads=12,
+ mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
+ return model
+
+
+def mae_vit_large_patch16(**kwargs):
+ model = MaskedAutoencoderViT(
+ patch_size=16, embed_dim=1024, depth=24, num_heads=16,
+ mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
+ return model
+
+
+
+# set recommended archs
+mae_vit_base_patch16 = mae_vit_base_patch16
+mae_vit_large_patch16 = mae_vit_large_patch16
+
\ No newline at end of file
diff --git a/models_vit.py b/models_vit.py
new file mode 100644
index 0000000..e94591f
--- /dev/null
+++ b/models_vit.py
@@ -0,0 +1,96 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
+# DeiT: https://github.com/facebookresearch/deit
+# --------------------------------------------------------
+
+from functools import partial
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import timm.models.vision_transformer
+
+
+class VisionTransformer(timm.models.vision_transformer.VisionTransformer):
+ """ Vision Transformer with support for global average pooling
+ """
+ def __init__(self, global_pool=False, **kwargs):
+ super(VisionTransformer, self).__init__(**kwargs)
+
+ self.global_pool = global_pool
+ if self.global_pool:
+ norm_layer = kwargs['norm_layer']
+ embed_dim = kwargs['embed_dim']
+ self.fc_norm = norm_layer(embed_dim)
+
+ del self.norm # remove the original norm
+
+ def forward_features(self, x):
+ B = x.shape[0]
+ x = self.patch_embed(x)
+
+ cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
+ x = torch.cat((cls_tokens, x), dim=1)
+ x = x + self.pos_embed
+ x = self.pos_drop(x)
+
+ for blk in self.blocks:
+ x = blk(x)
+
+ if self.global_pool:
+ x = x[:, 1:, :].mean(dim=1) # global pool without cls token
+ outcome = self.fc_norm(x)
+ else:
+ x = self.norm(x)
+ outcome = x[:, 0]
+
+ return outcome
+
+ def extract_features(self, x):
+ B = x.shape[0]
+ x = self.patch_embed(x)
+
+ cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
+ x = torch.cat((cls_tokens, x), dim=1)
+ x = x + self.pos_embed
+ x = self.pos_drop(x)
+
+ for blk in self.blocks:
+ x = blk(x)
+
+ if self.global_pool:
+ x = x[:, 1:, :].mean(dim=1) # global pool without cls token
+ else:
+ x = x[:, 0]
+
+ return x
+
+
+def vit_base_patch16(**kwargs):
+ model = VisionTransformer(
+ patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
+ norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
+ return model
+
+
+def vit_large_patch16(**kwargs):
+ model = VisionTransformer(
+ patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
+ norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
+ return model
+
+
+def vit_huge_patch14(**kwargs):
+ model = VisionTransformer(
+ patch_size=14, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True,
+ norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
+ return model
+
+
+
diff --git a/util/crop.py b/util/crop.py
new file mode 100644
index 0000000..fcb2612
--- /dev/null
+++ b/util/crop.py
@@ -0,0 +1,42 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+
+import torch
+
+from torchvision import transforms
+from torchvision.transforms import functional as F
+
+
+class RandomResizedCrop(transforms.RandomResizedCrop):
+ """
+ RandomResizedCrop for matching TF/TPU implementation: no for-loop is used.
+ This may lead to results different with torchvision's version.
+ Following BYOL's TF code:
+ https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206
+ """
+ @staticmethod
+ def get_params(img, scale, ratio):
+ width, height = F._get_image_size(img)
+ area = height * width
+
+ target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
+ log_ratio = torch.log(torch.tensor(ratio))
+ aspect_ratio = torch.exp(
+ torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
+ ).item()
+
+ w = int(round(math.sqrt(target_area * aspect_ratio)))
+ h = int(round(math.sqrt(target_area / aspect_ratio)))
+
+ w = min(w, width)
+ h = min(h, height)
+
+ i = torch.randint(0, height - h + 1, size=(1,)).item()
+ j = torch.randint(0, width - w + 1, size=(1,)).item()
+
+ return i, j, h, w
\ No newline at end of file
diff --git a/util/datasets.py b/util/datasets.py
new file mode 100644
index 0000000..b59e5ea
--- /dev/null
+++ b/util/datasets.py
@@ -0,0 +1,94 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# DeiT: https://github.com/facebookresearch/deit
+# --------------------------------------------------------
+
+import os
+import glob
+import PIL
+import torch
+from io import BytesIO
+from PIL import Image
+from torchvision import datasets, transforms
+
+from timm.data import create_transform
+from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
+
+
+class ImageNet1k_JPG(torch.utils.data.Dataset):
+ '''
+ An ImageNet-1k dataset with caching support.
+ '''
+
+ def __init__(self, image_root, meta_path, transform):
+ self.transform = transform
+
+ with open(meta_path) as f:
+ self.data_list = f.read().splitlines()
+ self.image_root = image_root
+
+ def __len__(self):
+ return len(self.data_list)
+
+ def __getitem__(self, idx):
+ line = self.data_list[idx]
+ path, label = line.split(' ')
+
+ path = os.path.join(self.image_root, path)
+ label = int(label)
+
+ image = Image.open(path).convert('RGB')
+ image = self.transform(image)
+
+ return image, label
+
+def build_dataset_jpg(is_train, args):
+ transform = build_transform(is_train, args)
+ data_root = args.data_path
+ image_root = os.path.join(data_root, 'train' if is_train else 'val')
+ meta_path = os.path.join(data_root, 'meta', 'train.txt' if is_train else 'val.txt')
+ dataset = ImageNet1k_JPG(image_root, meta_path, transform)
+ print(f"Dataset at {meta_path}. Length of {len(dataset)}")
+ return dataset
+
+def build_transform(is_train, args):
+ mean = IMAGENET_DEFAULT_MEAN
+ std = IMAGENET_DEFAULT_STD
+ # train transform
+ if is_train:
+ # this should always dispatch to transforms_imagenet_train
+ transform = create_transform(
+ input_size=args.input_size,
+ is_training=True,
+ color_jitter=args.color_jitter,
+ auto_augment=args.aa,
+ interpolation='bicubic',
+ re_prob=args.reprob,
+ re_mode=args.remode,
+ re_count=args.recount,
+ mean=mean,
+ std=std,
+ )
+ return transform
+
+ # eval transform
+ t = []
+ if args.input_size <= 224:
+ crop_pct = 224 / 256
+ else:
+ crop_pct = 1.0
+ size = int(args.input_size / crop_pct)
+ t.append(
+ transforms.Resize(size, interpolation=PIL.Image.BICUBIC), # to maintain same ratio w.r.t. 224 images
+ )
+ t.append(transforms.CenterCrop(args.input_size))
+
+ t.append(transforms.ToTensor())
+ t.append(transforms.Normalize(mean, std))
+ return transforms.Compose(t)
+
diff --git a/util/lars.py b/util/lars.py
new file mode 100644
index 0000000..509c5f6
--- /dev/null
+++ b/util/lars.py
@@ -0,0 +1,47 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# LARS optimizer, implementation from MoCo v3:
+# https://github.com/facebookresearch/moco-v3
+# --------------------------------------------------------
+
+import torch
+
+
+class LARS(torch.optim.Optimizer):
+ """
+ LARS optimizer, no rate scaling or weight decay for parameters <= 1D.
+ """
+ def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001):
+ defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient)
+ super().__init__(params, defaults)
+
+ @torch.no_grad()
+ def step(self):
+ for g in self.param_groups:
+ for p in g['params']:
+ dp = p.grad
+
+ if dp is None:
+ continue
+
+ if p.ndim > 1: # if not normalization gamma/beta or bias
+ dp = dp.add(p, alpha=g['weight_decay'])
+ param_norm = torch.norm(p)
+ update_norm = torch.norm(dp)
+ one = torch.ones_like(param_norm)
+ q = torch.where(param_norm > 0.,
+ torch.where(update_norm > 0,
+ (g['trust_coefficient'] * param_norm / update_norm), one),
+ one)
+ dp = dp.mul(q)
+
+ param_state = self.state[p]
+ if 'mu' not in param_state:
+ param_state['mu'] = torch.zeros_like(p)
+ mu = param_state['mu']
+ mu.mul_(g['momentum']).add_(dp)
+ p.add_(mu, alpha=-g['lr'])
\ No newline at end of file
diff --git a/util/lr_decay.py b/util/lr_decay.py
new file mode 100644
index 0000000..7fa11f1
--- /dev/null
+++ b/util/lr_decay.py
@@ -0,0 +1,76 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# ELECTRA https://github.com/google-research/electra
+# BEiT: https://github.com/microsoft/unilm/tree/master/beit
+# --------------------------------------------------------
+
+import json
+
+
+def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=.75):
+ """
+ Parameter groups for layer-wise lr decay
+ Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58
+ """
+ param_group_names = {}
+ param_groups = {}
+
+ num_layers = len(model.blocks) + 1
+
+ layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1))
+
+ for n, p in model.named_parameters():
+ if not p.requires_grad:
+ continue
+
+ # no decay: all 1D parameters and model specific ones
+ if p.ndim == 1 or n in no_weight_decay_list:
+ g_decay = "no_decay"
+ this_decay = 0.
+ else:
+ g_decay = "decay"
+ this_decay = weight_decay
+
+ layer_id = get_layer_id_for_vit(n, num_layers)
+ group_name = "layer_%d_%s" % (layer_id, g_decay)
+
+ if group_name not in param_group_names:
+ this_scale = layer_scales[layer_id]
+
+ param_group_names[group_name] = {
+ "lr_scale": this_scale,
+ "weight_decay": this_decay,
+ "params": [],
+ }
+ param_groups[group_name] = {
+ "lr_scale": this_scale,
+ "weight_decay": this_decay,
+ "params": [],
+ }
+
+ param_group_names[group_name]["params"].append(n)
+ param_groups[group_name]["params"].append(p)
+
+ # print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2))
+
+ return list(param_groups.values())
+
+
+def get_layer_id_for_vit(name, num_layers):
+ """
+ Assign a parameter with its layer id
+ Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
+ """
+ if name in ['cls_token', 'pos_embed']:
+ return 0
+ elif name.startswith('patch_embed'):
+ return 0
+ elif name.startswith('blocks'):
+ return int(name.split('.')[1]) + 1
+ else:
+ return num_layers
\ No newline at end of file
diff --git a/util/lr_sched.py b/util/lr_sched.py
new file mode 100644
index 0000000..4cb682b
--- /dev/null
+++ b/util/lr_sched.py
@@ -0,0 +1,21 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+
+def adjust_learning_rate(optimizer, epoch, args):
+ """Decay the learning rate with half-cycle cosine after warmup"""
+ if epoch < args.warmup_epochs:
+ lr = args.lr * epoch / args.warmup_epochs
+ else:
+ lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \
+ (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
+ for param_group in optimizer.param_groups:
+ if "lr_scale" in param_group:
+ param_group["lr"] = lr * param_group["lr_scale"]
+ else:
+ param_group["lr"] = lr
+ return lr
diff --git a/util/misc.py b/util/misc.py
new file mode 100644
index 0000000..0676a5c
--- /dev/null
+++ b/util/misc.py
@@ -0,0 +1,373 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# DeiT: https://github.com/facebookresearch/deit
+# BEiT: https://github.com/microsoft/unilm/tree/master/beit
+# --------------------------------------------------------
+
+import builtins
+import datetime
+import os
+import glob
+import time
+from collections import defaultdict, deque
+from pathlib import Path
+
+import torch
+import torch.distributed as dist
+from torch._six import inf
+import subprocess
+
+class SmoothedValue(object):
+ """Track a series of values and provide access to smoothed values over a
+ window or the global series average.
+ """
+
+ def __init__(self, window_size=20, fmt=None):
+ if fmt is None:
+ fmt = "{median:.4f} ({global_avg:.4f})"
+ self.deque = deque(maxlen=window_size)
+ self.total = 0.0
+ self.count = 0
+ self.fmt = fmt
+
+ def update(self, value, n=1):
+ self.deque.append(value)
+ self.count += n
+ self.total += value * n
+
+ def synchronize_between_processes(self):
+ """
+ Warning: does not synchronize the deque!
+ """
+ if not is_dist_avail_and_initialized():
+ return
+ t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
+ dist.barrier()
+ dist.all_reduce(t)
+ t = t.tolist()
+ self.count = int(t[0])
+ self.total = t[1]
+
+ @property
+ def median(self):
+ d = torch.tensor(list(self.deque))
+ return d.median().item()
+
+ @property
+ def avg(self):
+ d = torch.tensor(list(self.deque), dtype=torch.float32)
+ return d.mean().item()
+
+ @property
+ def global_avg(self):
+ return self.total / self.count
+
+ @property
+ def max(self):
+ return max(self.deque)
+
+ @property
+ def value(self):
+ return self.deque[-1]
+
+ def __str__(self):
+ return self.fmt.format(
+ median=self.median,
+ avg=self.avg,
+ global_avg=self.global_avg,
+ max=self.max,
+ value=self.value)
+
+
+class MetricLogger(object):
+ def __init__(self, delimiter="\t"):
+ self.meters = defaultdict(SmoothedValue)
+ self.delimiter = delimiter
+
+ def update(self, **kwargs):
+ for k, v in kwargs.items():
+ if v is None:
+ continue
+ if isinstance(v, torch.Tensor):
+ v = v.item()
+ assert isinstance(v, (float, int))
+ self.meters[k].update(v)
+
+ def __getattr__(self, attr):
+ if attr in self.meters:
+ return self.meters[attr]
+ if attr in self.__dict__:
+ return self.__dict__[attr]
+ raise AttributeError("'{}' object has no attribute '{}'".format(
+ type(self).__name__, attr))
+
+ def __str__(self):
+ loss_str = []
+ for name, meter in self.meters.items():
+ loss_str.append(
+ "{}: {}".format(name, str(meter))
+ )
+ return self.delimiter.join(loss_str)
+
+ def synchronize_between_processes(self):
+ for meter in self.meters.values():
+ meter.synchronize_between_processes()
+
+ def add_meter(self, name, meter):
+ self.meters[name] = meter
+
+ def log_every(self, iterable, print_freq, header=None):
+ i = 0
+ if not header:
+ header = ''
+ start_time = time.time()
+ end = time.time()
+ iter_time = SmoothedValue(fmt='{avg:.4f}')
+ data_time = SmoothedValue(fmt='{avg:.4f}')
+ space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
+ log_msg = [
+ header,
+ '[{0' + space_fmt + '}/{1}]',
+ 'eta: {eta}',
+ '{meters}',
+ 'time: {time}',
+ 'data: {data}'
+ ]
+ if torch.cuda.is_available():
+ log_msg.append('max mem: {memory:.0f}')
+ log_msg = self.delimiter.join(log_msg)
+ MB = 1024.0 * 1024.0
+ for obj in iterable:
+ data_time.update(time.time() - end)
+ yield obj
+ iter_time.update(time.time() - end)
+ if i % print_freq == 0 or i == len(iterable) - 1:
+ eta_seconds = iter_time.global_avg * (len(iterable) - i)
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
+ if torch.cuda.is_available():
+ print(log_msg.format(
+ i, len(iterable), eta=eta_string,
+ meters=str(self),
+ time=str(iter_time), data=str(data_time),
+ memory=torch.cuda.max_memory_allocated() / MB))
+ else:
+ print(log_msg.format(
+ i, len(iterable), eta=eta_string,
+ meters=str(self),
+ time=str(iter_time), data=str(data_time)))
+ i += 1
+ end = time.time()
+ total_time = time.time() - start_time
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
+ print('{} Total time: {} ({:.4f} s / it)'.format(
+ header, total_time_str, total_time / len(iterable)))
+
+
+def setup_for_distributed(is_master):
+ """
+ This function disables printing when not in master process
+ """
+ builtin_print = builtins.print
+
+ def print(*args, **kwargs):
+ force = kwargs.pop('force', False)
+ force = force or (get_world_size() > 8)
+ if is_master or force:
+ now = datetime.datetime.now().time()
+ builtin_print('[{}] '.format(now), end='') # print with time stamp
+ builtin_print(*args, **kwargs)
+
+ builtins.print = print
+
+
+def is_dist_avail_and_initialized():
+ if not dist.is_available():
+ return False
+ if not dist.is_initialized():
+ return False
+ return True
+
+
+def get_world_size():
+ if not is_dist_avail_and_initialized():
+ return 1
+ return dist.get_world_size()
+
+
+def get_rank():
+ if not is_dist_avail_and_initialized():
+ return 0
+ return dist.get_rank()
+
+
+def is_main_process():
+ return get_rank() == 0
+
+
+def save_on_master(*args, **kwargs):
+ if is_main_process():
+ torch.save(*args, **kwargs)
+
+
+def init_distributed_mode(args):
+ if args.dist_on_itp:
+ args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
+ args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
+ args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
+ args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
+ os.environ['LOCAL_RANK'] = str(args.gpu)
+ os.environ['RANK'] = str(args.rank)
+ os.environ['WORLD_SIZE'] = str(args.world_size)
+ # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
+ elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
+ args.rank = int(os.environ["RANK"])
+ args.world_size = int(os.environ['WORLD_SIZE'])
+ args.gpu = int(os.environ['LOCAL_RANK'])
+ else:
+ print('Not using distributed mode')
+ setup_for_distributed(is_master=True) # hack
+ args.distributed = False
+ return
+
+ args.distributed = True
+
+ torch.cuda.set_device(args.gpu)
+ args.dist_backend = 'nccl'
+ print('| distributed init (rank {}): {}, gpu {}'.format(
+ args.rank, args.dist_url, args.gpu), flush=True)
+ torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
+ world_size=args.world_size, rank=args.rank)
+ torch.distributed.barrier()
+ setup_for_distributed(args.rank == 0)
+
+
+class NativeScalerWithGradNormCount:
+ state_dict_key = "amp_scaler"
+
+ def __init__(self):
+ self._scaler = torch.cuda.amp.GradScaler()
+
+ def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
+ self._scaler.scale(loss).backward(create_graph=create_graph)
+ if update_grad:
+ if clip_grad is not None:
+ assert parameters is not None
+ self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
+ norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
+ else:
+ self._scaler.unscale_(optimizer)
+ norm = get_grad_norm_(parameters)
+ self._scaler.step(optimizer)
+ self._scaler.update()
+ else:
+ norm = None
+ return norm
+
+ def state_dict(self):
+ return self._scaler.state_dict()
+
+ def load_state_dict(self, state_dict):
+ self._scaler.load_state_dict(state_dict)
+
+
+def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
+ if isinstance(parameters, torch.Tensor):
+ parameters = [parameters]
+ parameters = [p for p in parameters if p.grad is not None]
+ norm_type = float(norm_type)
+ if len(parameters) == 0:
+ return torch.tensor(0.)
+ device = parameters[0].grad.device
+ if norm_type == inf:
+ total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
+ else:
+ total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
+ return total_norm
+
+
+def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler):
+ output_dir = Path(args.output_dir)
+ epoch_name = str(epoch)
+ if loss_scaler is not None:
+ checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
+ for checkpoint_path in checkpoint_paths:
+ to_save = {
+ 'model': model_without_ddp.state_dict(),
+ 'optimizer': optimizer.state_dict(),
+ 'epoch': epoch,
+ 'scaler': loss_scaler.state_dict(),
+ 'args': args,
+ }
+
+ save_on_master(to_save, checkpoint_path)
+ else:
+ client_state = {'epoch': epoch}
+ model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
+
+def save_model_latest(args, epoch, model, model_without_ddp, optimizer, loss_scaler):
+ output_dir = Path(args.output_dir)
+ epoch_name = str(epoch)
+ if loss_scaler is not None:
+ checkpoint_paths = [output_dir / 'checkpoint-latest.pth']
+ for checkpoint_path in checkpoint_paths:
+ to_save = {
+ 'model': model_without_ddp.state_dict(),
+ 'optimizer': optimizer.state_dict(),
+ 'epoch': epoch,
+ 'scaler': loss_scaler.state_dict(),
+ 'args': args,
+ }
+
+ save_on_master(to_save, checkpoint_path)
+ else:
+ client_state = {'epoch': epoch}
+ model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-latest", client_state=client_state)
+ print("Latest checkpoint saved.")
+
+def load_model(args, model_without_ddp, optimizer, loss_scaler):
+ output_dir = Path(args.output_dir)
+ if args.auto_resume and len(args.resume) == 0:
+ if os.path.exists(os.path.join(output_dir, 'checkpoint-latest.pth')):
+ args.resume = os.path.join(output_dir, 'checkpoint-latest.pth')
+ else:
+ all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
+ latest_ckpt = -1
+ for ckpt in all_checkpoints:
+ t = ckpt.split('-')[-1].split('.')[0]
+ if t.isdigit():
+ latest_ckpt = max(int(t), latest_ckpt)
+ if latest_ckpt >= 0:
+ args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
+
+ print("Auto resume checkpoint: %s" % args.resume)
+
+ if args.resume:
+ if args.resume.startswith('https'):
+ checkpoint = torch.hub.load_state_dict_from_url(
+ args.resume, map_location='cpu', check_hash=True)
+ else:
+ checkpoint = torch.load(args.resume, map_location='cpu')
+ model_without_ddp.load_state_dict(checkpoint['model'])
+ print("Resume checkpoint %s" % args.resume)
+ if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval):
+ optimizer.load_state_dict(checkpoint['optimizer'])
+ args.start_epoch = checkpoint['epoch'] + 1
+ if 'scaler' in checkpoint:
+ loss_scaler.load_state_dict(checkpoint['scaler'])
+ print("With optim & sched!")
+
+
+def all_reduce_mean(x):
+ world_size = get_world_size()
+ if world_size > 1:
+ x_reduce = torch.tensor(x).cuda()
+ dist.all_reduce(x_reduce)
+ x_reduce /= world_size
+ return x_reduce.item()
+ else:
+ return x
\ No newline at end of file
diff --git a/util/pos_embed.py b/util/pos_embed.py
new file mode 100644
index 0000000..6acf8bd
--- /dev/null
+++ b/util/pos_embed.py
@@ -0,0 +1,96 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# Position embedding utils
+# --------------------------------------------------------
+
+import numpy as np
+
+import torch
+
+# --------------------------------------------------------
+# 2D sine-cosine position embedding
+# References:
+# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
+# MoCo v3: https://github.com/facebookresearch/moco-v3
+# --------------------------------------------------------
+def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
+ """
+ grid_size: int of the grid height and width
+ return:
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
+ """
+ grid_h = np.arange(grid_size, dtype=np.float32)
+ grid_w = np.arange(grid_size, dtype=np.float32)
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
+ grid = np.stack(grid, axis=0)
+
+ grid = grid.reshape([2, 1, grid_size, grid_size])
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
+ if cls_token:
+ pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
+ return pos_embed
+
+
+def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
+ assert embed_dim % 2 == 0
+
+ # use half of dimensions to encode grid_h
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
+
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
+ return emb
+
+
+def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
+ """
+ embed_dim: output dimension for each position
+ pos: a list of positions to be encoded: size (M,)
+ out: (M, D)
+ """
+ assert embed_dim % 2 == 0
+ omega = np.arange(embed_dim // 2, dtype=np.float)
+ omega /= embed_dim / 2.
+ omega = 1. / 10000**omega # (D/2,)
+
+ pos = pos.reshape(-1) # (M,)
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
+
+ emb_sin = np.sin(out) # (M, D/2)
+ emb_cos = np.cos(out) # (M, D/2)
+
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
+ return emb
+
+
+# --------------------------------------------------------
+# Interpolate position embeddings for high-resolution
+# References:
+# DeiT: https://github.com/facebookresearch/deit
+# --------------------------------------------------------
+def interpolate_pos_embed(model, checkpoint_model):
+ if 'pos_embed' in checkpoint_model:
+ pos_embed_checkpoint = checkpoint_model['pos_embed']
+ embedding_size = pos_embed_checkpoint.shape[-1]
+ num_patches = model.patch_embed.num_patches
+ num_extra_tokens = model.pos_embed.shape[-2] - num_patches
+ # height (== width) for the checkpoint position embedding
+ orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
+ # height (== width) for the new position embedding
+ new_size = int(num_patches ** 0.5)
+ # class_token and dist_token are kept unchanged
+ if orig_size != new_size:
+ print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
+ extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
+ # only the position tokens are interpolated
+ pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
+ pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
+ pos_tokens = torch.nn.functional.interpolate(
+ pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
+ pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
+ new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
+ checkpoint_model['pos_embed'] = new_pos_embed