Skip to content

Commit

Permalink
public release
Browse files Browse the repository at this point in the history
  • Loading branch information
Dan Jia committed Oct 14, 2021
1 parent 8df1c8c commit 3a66e20
Show file tree
Hide file tree
Showing 67 changed files with 13,407 additions and 0 deletions.
33 changes: 33 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
*~
*__pycache__*
*_ext*
*.bag
*.csv
*.cu.o
*.DS_Store
*.gif
*.ipynb_checkpoints
*.pkl
*.png
*.pth
*.pyc
*.so
*.tfevents*
*.yml

.idea/
.vscode/
*.egg-info/
build/
ckpt/
ckpts/
dist/
devel/
experiments/
work_dirs/

data
logs
wandb
work_dirs
work_dirs_tmp
61 changes: 61 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# Person-MinkUNet

PyTorch implementation of Person-MinkUNet.
Winner of JRDB 3D detection challenge in JRDB-ACT Workshop at CVPR 2021
[`[arXiv]`](https://arxiv.org/abs/2107.06780)
[`[video]`](https://www.youtube.com/watch?v=RnGnONoX9cU)
[`[leaderboard]`](https://jrdb.stanford.edu/leaderboards/detection).

# Prerequisite

- `python>=3.8`
- `torchsparse==1.2.0` [(link)](https://github.com/mit-han-lab/torchsparse)
- `PyTorch==1.6.0`

# Quick start

Download [JackRabbot dataset](https://jrdb.stanford.edu/) under `PROJECT/data/JRDB`.

```
# install lidar_det project
python setup.py develop
# build libraries
cd lib/iou3d
python setup.py develop
cd ../jrdb_det3d_eval
python setup.py develop
```

Run
```
python bin/train.py --cfg PATH_TO_CFG [--ckpt PATH_TO_CKPT] [--evaluation]
```

# Model zoo

| Split | Checkpoint | Config |
|-------|------------|--------|
| train | [ckpt]() | [cfg]() |
| train + val | [ckpt]() | [cfg]() |

# Acknowledge

- torchsparse [(link)](https://github.com/mit-han-lab/torchsparse)
- PointRCNN [(link)](https://github.com/sshaoshuai/PointRCNN/tree/master/lib/utils/iou3d)

# Citation
```
@inproceedings{Jia2021PersonMinkUnet,
title = {{Person-MinkUNet: 3D Person Detection with LiDAR Point Cloud}},
author = {Dan Jia and Bastian Leibe},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)},
year = {2021}
}
```





108 changes: 108 additions & 0 deletions bin/train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import argparse
import yaml
import torch

from lidar_det.dataset import get_dataloader
from lidar_det.pipeline import Pipeline
from lidar_det.model import get_model


def run_training(model, pipeline, cfg):
# main train loop
train_loader = get_dataloader(
split="train", shuffle=True, dataset_cfg=cfg["dataset"], **cfg["dataloader"]
)
val_loader = get_dataloader(
split="val", shuffle=False, dataset_cfg=cfg["dataset"], **cfg["dataloader"]
)
status = pipeline.train(model, train_loader, val_loader)

# # test after training
# if not status:
# test_loader = get_dataloader(
# split="val",
# shuffle=False,
# dataset_cfg=cfg["dataset"],
# num_workers=1,
# batch_size=1,
# )
# pipeline.evaluate(model, test_loader, tb_prefix="VAL")


def run_evaluation(model, pipeline, cfg):
val_loader = get_dataloader(
split="val",
shuffle=False,
dataset_cfg=cfg["dataset"],
num_workers=1,
batch_size=1,
)
pipeline.evaluate(model, val_loader, tb_prefix="VAL", rm_files=False)

# test_loader = get_dataloader(
# split="test",
# shuffle=False,
# dataset_cfg=cfg["dataset"],
# num_workers=1,
# batch_size=1,
# )
# pipeline.evaluate(model, test_loader, tb_prefix="TEST", rm_files=False)


if __name__ == "__main__":
# Run benchmark to select fastest implementation of ops.
torch.backends.cudnn.benchmark = True

parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument(
"--cfg", type=str, required=True, help="configuration of the experiment"
)
parser.add_argument("--ckpt", type=str, required=False, default=None)
parser.add_argument("--cont", default=False, action="store_true")
parser.add_argument("--tmp", default=False, action="store_true")
parser.add_argument("--bs_one", default=False, action="store_true")
parser.add_argument("--evaluation", default=False, action="store_true")
args = parser.parse_args()

with open(args.cfg, "r") as f:
cfg = yaml.safe_load(f)
cfg["pipeline"]["Logger"]["backup_list"].append(args.cfg)
if args.evaluation:
cfg["pipeline"]["Logger"]["tag"] += "_EVAL"
if args.tmp:
cfg["pipeline"]["Logger"]["tag"] = "TMP_" + cfg["pipeline"]["Logger"]["tag"]
if args.bs_one:
cfg["dataloader"]["batch_size"] = 1

cfg["dataset"]["target_mode"] = cfg["model"]["target_mode"]
cfg["dataset"]["num_anchors"] = cfg["model"]["kwargs"]["num_anchors"]
cfg["dataset"]["num_ori_bins"] = cfg["model"]["kwargs"]["num_ori_bins"]
if cfg["dataset"]["name"] == "nuScenes":
nc = len(cfg["dataset"]["included_classes"])
cfg["model"]["kwargs"]["num_classes"] = nc if nc > 0 else 10
cfg["model"]["nuscenes"] = True
cfg["model"]["kwargs"]["input_dim"] = 3 + len(
cfg["dataset"]["additional_features"]
)
else:
cfg["model"]["kwargs"]["num_classes"] = 1
cfg["model"]["nuscenes"] = False
cfg["model"]["kwargs"]["input_dim"] = 3

model = get_model(cfg["model"])
model.cuda()

pipeline = Pipeline(model, cfg["pipeline"])

if args.ckpt:
pipeline.load_ckpt(model, args.ckpt)
elif args.cont and pipeline.sigterm_ckpt_exists():
pipeline.load_sigterm_ckpt(model)

# training or evaluation
if not args.evaluation:
run_training(model, pipeline, cfg)
else:
run_evaluation(model, pipeline, cfg)

pipeline.close()
126 changes: 126 additions & 0 deletions lib/iou3d/iou3d/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
import torch
import iou3d_cuda


# From https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/iou3d/iou3d_utils.py


def boxes3d_to_bev_torch(boxes3d):
"""
:param boxes3d: (N, 7) [x, y, z, h, w, l, ry]
:return:
boxes_bev: (N, 5) [x1, y1, x2, y2, ry]
"""
boxes_bev = boxes3d.new(torch.Size((boxes3d.shape[0], 5)))

cu, cv = boxes3d[:, 0], boxes3d[:, 2]
half_l, half_w = boxes3d[:, 5] / 2, boxes3d[:, 4] / 2
boxes_bev[:, 0], boxes_bev[:, 1] = cu - half_l, cv - half_w
boxes_bev[:, 2], boxes_bev[:, 3] = cu + half_l, cv + half_w
boxes_bev[:, 4] = boxes3d[:, 6]
return boxes_bev


def boxes_iou_bev(boxes_a, boxes_b):
"""
:param boxes_a: (M, 5)
:param boxes_b: (N, 5)
:return:
ans_iou: (M, N)
"""

ans_iou = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_()

iou3d_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)

return ans_iou


def boxes_iou3d_gpu(boxes_a, boxes_b):
"""
:param boxes_a: (M, 7) [x, y, z, h, w, l, ry]
:param boxes_b: (N, 7) [x, y, z, h, w, l, ry]
:return:
ans_iou: (M, N)
"""
boxes_a_bev = boxes3d_to_bev_torch(boxes_a)
boxes_b_bev = boxes3d_to_bev_torch(boxes_b)

# bev overlap
overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_cuda.boxes_overlap_bev_gpu(boxes_a_bev.contiguous(), boxes_b_bev.contiguous(), overlaps_bev)

# height overlap
boxes_a_height_min = (boxes_a[:, 1] - boxes_a[:, 3]).view(-1, 1)
boxes_a_height_max = boxes_a[:, 1].view(-1, 1)
boxes_b_height_min = (boxes_b[:, 1] - boxes_b[:, 3]).view(1, -1)
boxes_b_height_max = boxes_b[:, 1].view(1, -1)

max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0)

# 3d iou
overlaps_3d = overlaps_bev * overlaps_h

vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1)

iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-7)

return iou3d


def nms_gpu(boxes, scores, thresh):
"""
:param boxes: (N, 5) [x1, y1, x2, y2, ry]
:param scores: (N)
:param thresh:
:return:
"""
# areas = (x2 - x1) * (y2 - y1)
order = scores.sort(0, descending=True)[1]

boxes = boxes[order].contiguous()

keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous()


def nms_normal_gpu(boxes, scores, thresh):
"""
:param boxes: (N, 5) [x1, y1, x2, y2, ry]
:param scores: (N)
:param thresh:
:return:
"""
# areas = (x2 - x1) * (y2 - y1)
order = scores.sort(0, descending=True)[1]

boxes = boxes[order].contiguous()

keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous()


def nms_dist_gpu(boxes, scores, l_ave, w_ave, thresh):
"""
:param boxes: (N, 5) [x1, y1, x2, y2, ry]
:param scores: (N)
:param thresh:
:return:
"""
# areas = (x2 - x1) * (y2 - y1)
order = scores.sort(0, descending=True)[1]

boxes = boxes[order].contiguous()

keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_cuda.nms_dist_gpu(boxes, keep, l_ave, w_ave, thresh)
return order[keep[:num_out].cuda()].contiguous()


if __name__ == '__main__':
pass
Loading

0 comments on commit 3a66e20

Please sign in to comment.