forked from ultralytics/yolov3
-
Notifications
You must be signed in to change notification settings - Fork 58
/
Copy patheval_mot.py
152 lines (131 loc) · 5.1 KB
/
eval_mot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.
# See LICENSE file for terms.
"""Compute metrics for trackers using MOTChallenge ground-truth data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import OrderedDict
import glob
import logging
import os
from pathlib import Path
import motmetrics as mm
def parse_args():
"""Defines and parses command-line arguments."""
parser = argparse.ArgumentParser(
description="""
Compute metrics for trackers using MOTChallenge ground-truth data.
Files
-----
All file content, ground truth and test files, have to comply with the
format described in
Milan, Anton, et al.
"Mot16: A benchmark for multi-object tracking."
arXiv preprint arXiv:1603.00831 (2016).
https://motchallenge.net/
Structure
---------
Layout for ground truth data
<GT_ROOT>/<SEQUENCE_1>/gt/gt.txt
<GT_ROOT>/<SEQUENCE_2>/gt/gt.txt
...
Layout for test data
<TEST_ROOT>/<SEQUENCE_1>.txt
<TEST_ROOT>/<SEQUENCE_2>.txt
...
Sequences of ground truth and test will be matched according to the `<SEQUENCE_X>`
string.""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--groundtruths',
type=str,
default="./data/videosample",
help='Directory containing ground truth files.')
parser.add_argument('--tests',
type=str,
default="./data/videoresult",
help='Directory containing tracker result files')
parser.add_argument('--loglevel',
type=str,
help='Log level',
default='info')
parser.add_argument('--fmt', type=str, help='Data format', default='mot16')
parser.add_argument('--solver',
type=str,
help='LAP solver to use for matching between frames.')
parser.add_argument(
'--id_solver',
type=str,
help='LAP solver to use for ID metrics. Defaults to --solver.')
parser.add_argument('--exclude_id',
dest='exclude_id',
default=False,
action='store_true',
help='Disable ID metrics')
return parser.parse_args()
def compare_dataframes(gts, ts):
"""Builds accumulator for each sequence."""
accs = []
names = []
for k, tsacc in ts.items():
# print(k)
# print(gts)
if k in gts:
logging.info('Comparing %s...', k)
accs.append(
mm.utils.compare_to_groundtruth(gts[k],
tsacc,
'iou',
distth=0.5))
names.append(k)
else:
logging.warning('No ground truth for %s, skipping.', k)
return accs, names
if __name__ == '__main__':
# pylint: disable=missing-function-docstring
args = parse_args()
loglevel = getattr(logging, args.loglevel.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: {} '.format(args.loglevel))
logging.basicConfig(level=loglevel,
format='%(asctime)s %(levelname)s - %(message)s',
datefmt='%I:%M:%S')
if args.solver:
mm.lap.default_solver = args.solver
gtfiles = glob.glob(os.path.join(args.groundtruths, 'cutout*/gt.txt'))
tsfiles = [
f for f in glob.glob(os.path.join(args.tests, '*.txt'))
if not os.path.basename(f).startswith('eval')
]
# print(gtfiles,'\n\n', tsfiles)
logging.info('Found %d groundtruths and %d test files.', len(gtfiles),
len(tsfiles))
logging.info('Available LAP solvers %s', str(mm.lap.available_solvers))
logging.info('Default LAP solver \'%s\'', mm.lap.default_solver)
logging.info('Loading files.')
gt = OrderedDict([(Path(f).parts[-2],
mm.io.loadtxt(f, fmt=args.fmt, min_confidence=1))
for f in gtfiles])
ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0],
mm.io.loadtxt(f, fmt=args.fmt)) for f in tsfiles])
mh = mm.metrics.create()
accs, names = compare_dataframes(gt, ts)
metrics = list(mm.metrics.motchallenge_metrics)
if args.exclude_id:
metrics = [x for x in metrics if not x.startswith('id')]
logging.info('Running metrics')
if args.id_solver:
mm.lap.default_solver = args.id_solver
summary = mh.compute_many(accs,
names=names,
metrics=metrics,
generate_overall=True)
print(
mm.io.render_summary(summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names))
logging.info('Completed')