-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathautopetiii.html
1047 lines (996 loc) · 52.1 KB
/
autopetiii.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE HTML>
<!--
Landed by HTML5 UP
html5up.net | @ajlkn
Free for personal and commercial use under the CCA 3.0 license (html5up.net/license)
-->
<html>
<head>
<title>autoPET-III</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no" />
<link rel="stylesheet" href="assets/css/main.css" />
<noscript><link rel="stylesheet" href="assets/css/noscript.css" /></noscript>
<!--<script src="https://cdn.jsdelivr.net/npm/[email protected]"></script> -->
<!--<script src="https://cdn.jsdelivr.net/npm/[email protected]"></script> -->
</head>
<body class="is-preload">
<div id="page-wrapper">
<!-- Header -->
<header id="header">
<h1 id="logo"><a href="index.html">autoPET</a></h1>
<nav id="nav">
<ul>
<li><a href="index.html">Home</a></li>
<li>
<a href="#">Challenges</a>
<ul>
<li><a href="autopeti.html">autoPET-I</a></li>
<li><a href="autopetii.html">autoPET-II</a></li>
<li><a href="autopetiii.html">autoPET-III</a></li>
<li><a href="autopetiv.html">autoPET/CT-IV</a></li>
</ul>
</li>
<li>
<a href="#">Databases</a>
<ul>
<li><a href="fdgpetct.html">FDG PET/CT</a></li>
<li><a href="psmapetct.html">PSMA PET/CT</a></li>
<li><a href="longitudinalct.html">Longitudinal CT</a></li>
</ul>
</li>
<li><a href="organizers.html">Organizers</a></li>
</ul>
</nav>
</header>
<!-- Main -->
<div id="main" class="wrapper style1">
<div class="container">
<header class="major">
<img src="images/autopet3_logo.png" height="100">
<h2>autoPET-III</h2>
<p>The tracer frontier</p>
</header>
<!-- Text -->
<section id="labels" class="wrapper2 style1 special">
<div class="container">
<div class="box alt">
<div class="row gtr-uniform">
<section class="col-4 col-6-medium col-12-xsmall">
<span class="icon solid alt major fa-list" onclick="window.location.href='#Task';"></span>
<a href="#Task"><h3>Task</h3></a>
</section>
<section class="col-4 col-6-medium col-12-xsmall">
<span class="icon solid alt major fa-database" onclick="window.location.href='#Database';"></span>
<a href="#Database"><h3>Database</h3></a>
</section>
<section class="col-4 col-6-medium col-12-xsmall">
<a href="#Evaluation">
<span class="icon solid alt major fa-dna" onclick="window.location.href='#Evaluation';"></span>
<h3>Evaluation</h3></a>
</section>
<section class="col-4 col-6-medium col-12-xsmall">
<a href="#Codes">
<span class="icon solid alt major fa-file" onclick="window.location.href='#Codes';"></span>
<h3>Codes and Models</h3></a>
</section>
<section class="col-4 col-6-medium col-12-xsmall">
<a href="#Leaderboard">
<span class="icon solid alt major fa-trophy" onclick="window.location.href='#Leaderboard';"></span>
<h3>Leaderboard</h3></a>
</section>
<section class="col-4 col-6-medium col-12-xsmall">
<a href="#Organizers">
<span class="icon solid alt major fa-people-arrows" onclick="window.location.href='#Organizers';"></span>
<h3>Organizers</h3></a>
</section>
</div>
</div>
</div>
</section>
<section id="Introduction">
<h3>Introduction</h3>
<p style="text-align:justify">
We invite you to participate in the third autoPET Challenge. The focus of this year's challenge is to further refine the automated segmentation of tumor lesions in Positron Emission Tomography/Computed Tomography (PET/CT) scans in a multitracer multicenter setting.
Over the past decades, PET/CT has emerged as a pivotal tool in oncological diagnostics, management and treatment planning. In clinical routine, medical experts typically rely on a qualitative analysis of the PET/CT images, although quantitative analysis would enable more precise and individualized tumor characterization and therapeutic decisions. A major barrier to clinical adoption is lesion segmentation, a necessary step for quantitative image analysis. Performed manually, it's tedious, time-consuming and costly. Machine Learning offers the potential for fast and fully automated quantitative analysis of PET/CT images, as previously demonstrated in the first two autoPET challenges.
Building upon the insights gained in these challenges, autoPET III expands the scope to address the critical need for models to generalize across multiple tracers and centers. To this end, we provide participants access to a more diverse PET/CT dataset containing images of two different tracers - Prostate-Specific Membrane Antigen (PSMA) and Fluorodeoxyglucose (FDG) acquired from two different clinical sites (Figure). In this challenge, we give participants the chance to let their models compete in two award categories. In award category one, participants are tasked with developing robust segmentation algorithms applicable to two different tracers. In award category two, the significance of data quality and preprocessing in algorithm performance is addressed. Here, participants are encouraged to enhance our baseline model using innovative data pipelines, fostering advancements in data-centric approaches to automated PET/CT lesion segmentation.
Join us in autoPET III to pave the way for robust deep-learning-based medical image analysis in PET/CT, optimizing diagnostics and personalizing therapy guidance in oncology.
</p><p style="text-align:justify">
AutoPET-III is hosted at the <a href="http://www.miccai.org/special-interest-groups/challenges/miccai-registered-challenges/" target="_blank">MICCAI 2024</a>: <a href="https://doi.org/10.5281/zenodo.10990932"><img src="https://zenodo.org/badge/DOI/10.5281/zenodo.10990932.svg"></a>
<br/>
and supported by the <a href="https://www.eshi-society.org/">European Society for hybrid, molecular and translational imaging (ESHI)</a>. The challenge is the successor of <a href="autopeti.html">autoPET</a> and <a href="autopeti.html">autoPET II</a>. </p>
<hr />
</section>
<section id="Grand_challenge_sec">
<h3>Grand Challenge</h3>
<p style="text-align:justify"></p>
More information about the challenge can be found on <a href="https://autopet-iii.grand-challenge.org/">Grand Challenge</a>.
</p>
<hr/>
</section>
<section id="Task_sec">
<a class="anchor" id="Task"></a>
<h3>Task</h3>
<p style="text-align:justify">
Based on the insights of the last two autoPET challenges, we expand the scope of the autoPET III challenge to the primary task of achieving multitracer multicenter generalization of automated lesion segmentation.
To this end, we provide participants access to two different PET/CT training datasets: a large, annotated FDG-PET/CT dataset acquired at the University Hospital Tübingen, Germany (UKT), and a large, annotated PSMA-PET/CT dataset acquired at LMU Hospital, LMU Munich, Germany (LMU). The FDG-UKT dataset was already used in the autoPET I and II challenges. The PSMA-LMU dataset is new. It encompasses 597 PET/CT volumes of male patients diagnosed with prostate carcinoma from LMU Hospital and shows a significant domain shift from the UKT training data (different tracers, different PET/CT scanners, and acquisition protocols). Algorithms will be tested on PSMA and FDG data from LMU and UKT, respectively.
<br/>
In addition, we will have a second award category, where participants are invited to submit our baseline model trained with their advanced data pipelines. This category is motivated by the observation that in autoPET I and II, data quality and handling in pre- and post-processing posed significant bottlenecks. Due to the rarity of publicly available PET data in the medical deep learning community, there is no standardized approach to preprocess these images (normalization, augmentations, etc.). The second award category will thus additionally promote data-centric approaches to automated PET/CT lesion segmentation.
<br/>
Testing will be performed on 200 studies (held-out test database). Test data will be drawn in part (50%) from the same sources and distributions as the training data. The other part will be drawn crosswise from the other center, i.e. PSMA from Tuebingen (25%) and FDG from LMU (25%).
<div class="col-12"><span class="image fit"><img src="images/autopet3_dataset.png"> </span></div>
<br/>
I.) Accurate segmentation of FDG- and PSMA-avid tumor lesions in whole-body PET/CT images. The specific challenge in automated segmentation of lesions in PET/CT is to avoid false-positive segmentation of anatomical structures that have physiologically high uptake while capturing all tumor lesions. This task is particularly challenging in a multitracer setting since the physiological uptake partly differs for different tracers: e.g. brain, kidney, heart for FDG and e.g. liver, kidney, spleen, submandibular for PSMA.<br/>
II.) Robust behavior of the to-be-developed algorithms with respect to moderate changes in the choice of tracer, acquisition protocol, or acquisition site. This will be reflected by the test data which will be drawn partly from the same distribution as the training data and partly from a different hospital with a similar, but slightly different acquisition setup.<br/>
<br/>
We encourage two submission tracks:<br/>
1.) Best generalizing model: Best ranked model wins! The rules are simple: Train a model which generalizes well on FDG and PSMA data. Or train two models and combine them? You are free to choose. You can use additional data which is publicly available. <br/>
2.) Datacentrism: Best data-handling wins! In real-world applications, especially in the medical domain, data is messy. Improving models is not the only way to get better performance. You can also improve the dataset itself rather than treating it as fixed. This is the core idea of a popular research direction called Data-Centric AI (DCAI). Examples are outlier detection and removal (handling abnormal examples in dataset), error detection and correction (handling incorrect values/labels in dataset), data augmentation (adding examples to data to encode prior knowledge) and many more. If you are interested: a good resource to start is <a href="https://dcai.csail.mit.edu/">DCAI</a>. The rules are: Train a model which generalizes well on FDG and PSMA data but DO NOT alter the model architecture or get lost in configuration ablations. For that we will provide a second baseline <a href="https://github.com/ClinicalDataScience/autoPETIII/tree/main/datacentric-baseline">container</a> and a <a href="https://github.com/ClinicalDataScience/datacentric-challenge/tree/main">tutorial</a> how to use and train the model. You are not allowed to use any additional data and the datacentric baseline model will be in competition.<br/>
</p>
<hr/>
</section>
<section id="Database_sec">
<a class="anchor" id="Database"></a>
<h3>Database</h3>
<p style="text-align:justify">
The challenge cohort consists of patients with histologically proven malignant melanoma, lymphoma or lung cancer as well as negative control patients who were examined by FDG-PET/CT in two large medical centers (University Hospital Tübingen, Germany & University Hospital of the LMU in Munich, Germany).
<br/>
The FDG cohort comprises 501 patients diagnosed with histologically proven malignant melanoma, lymphoma, or lung cancer, along with 513 negative control patients. The PSMA cohort includes pre- and/or post-therapeutic PET/CT images of male individuals with prostate carcinoma, encompassing images with (537) and without PSMA-avid tumor lesions (60). Notably, the training datasets exhibit distinct age distributions: the FDG UKT cohort spans 570 male patients (mean age: 60; std: 16) and 444 female patients (mean age: 58; std: 16), whereas the PSMA MUC cohort tends to be older, with 378 male patients (mean age: 71; std: 8). Additionally, there are variations in imaging conditions between the FDG Tübingen and PSMA Munich cohorts, particularly regarding the types and number of PET/CT scanners utilized for acquisition. The PSMA Munich dataset was acquired using three different scanner types (Siemens Biograph 64-4R TruePoint, Siemens Biograph mCT Flow 20, and GE Discovery 690), whereas the FDG Tübingen dataset was acquired using a single scanner (Siemens Biograph mCT).
</p>
<h4>PET/CT acquisition protocol</h4>
<p style="text-align:justify">
FDG dataset: Patients fasted at least 6 h prior to the injection of approximately 350 MBq 18F-FDG. Whole-body PET/CT images were acquired using a Biograph mCT PET/CT scanner (Siemens, Healthcare GmbH, Erlangen, Germany) and were initiated approximately 60 min after intravenous tracer administration. Diagnostic CT scans of the neck, thorax, abdomen, and pelvis (200 reference mAs; 120 kV) were acquired 90 sec after intravenous injection of a contrast agent (90-120 ml Ultravist 370, Bayer AG) or without contrast agent (in case of existing contraindications). PET Images were reconstructed iteratively (three iterations, 21 subsets) with Gaussian post-reconstruction smoothing (2 mm full width at half-maximum). Slice thickness on contrast-enhanced CT was 2 or 3 mm.
<br/>
PSMA dataset: Examinations were acquired on different PET/CT scanners (Siemens Biograph 64-4R TruePoint, Siemens Biograph mCT Flow 20, and GE Discovery 690). The imaging protocol mainly consisted of a diagnostic CT scan from the skull base to the mid-thigh using the following scan parameters: reference tube current exposure time product of 143 mAs (mean); tube voltage of 100kV or 120 kV for most cases, slice thickness of 3 mm for Biograph 64 and Biograph mCT, and 2.5 mm for GE Discovery 690 (except for 3 cases with 5 mm). Intravenous contrast enhancement was used in most studies (571), except for patients with contraindications (26).
<br/>
The whole-body PSMA-PET scan was acquired on average around 74 minutes after intravenous injection of 246 MBq 18F-PSMA (mean, 369 studies) or 214 MBq 68Ga-PSMA (mean, 228 studies), respectively. The PET data was reconstructed with attenuation correction derived from corresponding CT data. For GE Discovery 690 the reconstruction process employed a VPFX algorithm with voxel size 2.73 mm × 2.73 mm × 3.27 mm, for Siemens Biograph mCT Flow 20 a PSF+TOF algorithm (2 iterations, 21 subsets) with voxel size 4.07 mm × 4.07 mm × 3.00 mm, and for Siemens Biograph 64-4R TruePoint a PSF algorithm (3 iterations, 21 subsets) with voxel size 4.07 mm × 4.07 mm × 5.00 mm.
</p>
<h4>Training and test cohort</h4>
<p style="text-align:justify">
Training cases: 1,014 studies (900 patients) and 597 PSMA studies (378 patients)<br/>
Test cases (final evaluation): 200 studies (50 FDG LMU, 50 FDG UKT, 50 PSMA LMU, 50 PSMA UKT)<br/>
Test cases (preliminary evaluation): 5 studies<br/>
A case (training or test case) consists of one 3D whole body FDG-PET volume, one corresponding 3D whole body CT volume and one 3D binary mask of manually segmented tumor lesions on FDG-PET of the size of the PET volume. CT and PET were acquired simultaneously on a single PET/CT scanner in one session; thus PET and CT are anatomically aligned up to minor shifts due to physiological motion.
</p>
<h4>Training set</h4>
<p style="text-align:justify">
Training data consists of 1,014 <a href="fdgpetct.html">FDG PET/CT</a> studies and 597 <a href="psmapetct.html">PSMA PET/CT</a> studies. We provide the merged data in <a href="https://github.com/MIC-DKFZ/nnUNet">nnUNet</a> format. The download will contain the resampled FDG and PSMA data as NiFTI files. It also contains the files obtained by running the nnUNet fingerprint extractor and a splits file which we use to design/train our baselines.
<br/>NiFTI:
</p>
<pre><code>
|--- imagesTr
|--- tracer_patient1_study1_0000.nii.gz (CT image resampled to PET)
|--- tracer_patient1_study1_0001.nii.gz (PET image in SUV)
|--- ...
|--- labelsTr
|--- tracer_patient1_study1.nii.gz (manual annotations of tumor lesions)
|--- dataset.json (nnUNet dataset description)
|--- dataset_fingerprint.json (nnUNet dataset fingerprint)
|--- splits_final.json (reference 5fold split)
|--- psma_metadata.csv (metadata csv for psma)
|--- fdg_metadata.csv (original metadata csv for fdg)
</code></pre>
<h5>FDG PET/CT</h5>
<p style="text-align:justify">
FDG PET/CT training data contains 1,014 studies acquired at the University Hospital Tübingen and is made publicly available on <a href="https://www.cancerimagingarchive.net/">TCIA</a> (as DICOM, NiFTI and HDF5 files). After download, you can convert the DICOM files to e.g. the NIfTI format using scripts provided <a href="https://github.com/lab-midas/TCIA_processing">here</a>.
</p>
DICOM: <a href="https://doi.org/10.7937/gkr0-xv29"><img src="https://img.shields.io/badge/DOI-10.7937%2Fgkr0--xv29-blue"></a>
<br/>
NiFTI:
<br/>
If you use this data, please cite:
<blockquote>Gatidis S, Kuestner T. A whole-body FDG-PET/CT dataset with manually annotated tumor lesions (FDG-PET-CT-Lesions) [Dataset]. The Cancer Imaging Archive, 2022. DOI: 10.7937/gkr0-xv29
</blockquote>
<h5>PSMA PET/CT</h5>
<p style="text-align:justify">
PSMA PET/CT training data contains 597 studies acquired at the University Hospital Munich and is made publicly available on <a href="https://www.cancerimagingarchive.net/">TCIA</a>. After download, you can convert the DICOM files to e.g. the NIfTI format using scripts provided <a href="https://github.com/lab-midas/TCIA_processing">here</a>.
</p>
DICOM: <a href=""><img src=""></a>
<br/>
NiFTI:
<br/>
If you use this data, please cite:
<blockquote>Jeblick, K., et al. A whole-body PSMA-PET/CT dataset with manually annotated tumor lesions (PSMA-PET-CT-Lesions) (Version 1) [Dataset]. The Cancer Imaging Archive, 2024. DOI: 10.7937/r7ep-3x37
</blockquote>
<h4>Preliminary test set</h4>
<p style="text-align:justify">
For the self-evaluation of participating pipelines, we provide access to a preliminary test set. The preliminary test set uses the same imaging data as the final test set, but consists of 5 studies only.
<br/>
The access to this preliminary set is restricted and only possible through the docker containers submitted to the challenge, and only available for a limited time during the competition. The purpose of this is that participants can check the sanity of their approaches.
</p>
<h4>Final test set</h4>
<p style="text-align:justify">
The final test set consists of 200 studies.
</p>
<h4>Data pre-processing and structure</h4>
<p style="text-align:justify">
In a pre-processing step, the TCIA DICOM files are resampled (CT to PET imaging resolution, i.e. same matrix size) and normalized (PET converted to standardized update values; SUV).
For the challenge, the pre-processed data will be provided in NifTI format. PET data is standardized by converting image units from activity counts to standardized uptake values (SUV). We recommend to use the resampled CT (CTres.nii.gz) and the PET in SUV (SUV.nii.gz). The mask (SEG.nii.gz) is binary with 1 indicating the lesion. The training and test database have the following structure:
<pre><code>
|--- Patient 1
|--- Study 1
|--- SUV.nii.gz (PET image in SUV)
|--- CTres.nii.gz (CT image resampled to PET)
|--- CT.nii.gz (Original CT image)
|--- SEG.nii.gz (Manual annotations of tumor lesions)
|--- PET.nii.gz (Original PET image as actictivity counts)
|--- Study 2 (Potential 2nd visit of same patient)
|--- ...
|--- Patient 2
|--- ...
</code></pre>
Each NiFTI file contains the respective image or the mask.
An example case can be loaded as:
<pre><code>import nibabel as nib
SUV = nib.load(os.path.join(data_root_path, 'PETCT_0af7ffe12a', '08-12-2005-NA-PET-CT Ganzkoerper primaer mit KM-96698', 'SUV.nii.gz'))
</code></pre>
where <code>PETCT_0af7ffe12a</code> is the fully anonymized patient and <code>08-12-2005-NA-PET-CT Ganzkoerper primaer mit KM-96698</code> is the anonymized study (randomly generated study name, date is not reflecting scan date).
</p>
<h4>Annotation</h4>
<p style="text-align:justify">
FDG PET/CT training and test data from UKT was annotated by a Radiologist with 10 years of experience in Hybrid Imaging and experience in machine learning research. FDG PET/CT test data from LMU was annotated by a radiologist with 8 years of experience in hybrid imaging. PSMA PET/CT training and test data from LMU as well as PSMA PET/CT test data from UKT was annotated by a single reader and reviewed by a radiologist with 5 years of experience in hybrid imaging. br/>
The following annotation protocol was defined:<br/>
Step 1: Identification of FDG-avid tumor lesions by visual assessment of PET and CT information together with the clinical examination reports.<br/>
Step 2: Manual free-hand segmentation of identified lesions in axial slices.
</p>
<hr/>
</section>
<section id="Evaluation_sec">
<a class="anchor" id="Evaluation"></a>
<h3>Evaluation</h3>
<p style="text-align:justify">
Evaluation will be performed on held-out test cases of 200 patients. Test data will be drawn in part (50%) from the same sources and distributions as the training data, i.e. 50 PSMA-PET/CT scans from LMU, 50 FDG-PET/CT scans from UKT. The other part will be drawn crosswise from the other center, i.e. 50 PSMA-PET/CT scans from UKT and 50 FDG-PET/CT scans from LMU.
A combination of two metrics reflecting the aims and specific challenges for the task of PET lesion segmentation:
<ol>
<li>Foreground Dice score of segmented lesions</li>
<li>Volume of false positive connected components that do not overlap with positives (=false positive volume)</li>
<li>Volume of positive connected components in the ground truth that do not overlap with the estimated segmentation mask (=false negative volume)</li>
</ol>
In case of test data that do not contain positives (no FDG-avid or PSMA-avid lesions), only metric 2 will be used. <br/>
A python script computing these evaluation metrics is provided under <a href="https://github.com/lab-midas/autoPET">https://github.com/lab-midas/autoPET</a>.
<br/>
<img src="images/metricautopet.png" width="50%"><br/>
<i>Figure: Example for the evaluation. The Dice score is calculated to measure the correct overlap between predicted lesion segmentation (blue) and ground truth (red). Additionally special emphasis is put on false positives by measuring their volume, i.e. large false positives like brain or bladder will result in a low score and false negatives by measuring their volume (i.e. entirely missed lesions).</i>
<br/>
</p>
<h4>Ranking</h4>
<p style="text-align:justify">
The submitted algorithms will be ranked according to:<br>
Step 1: Divide the test dataset into subsets based on center and tracer (i.e., PSMA LMU, PSMA UKT, FDG LMU, FDG UKT) and calculated the average metrics for Dice, FPV, and FNV within each subset.
<br>
Step 2: Rank the subset averages across all algorithms. For each metric, we computed an intermediate average rank by averaging the ranks of the subsets.
<br>
Step 3: Generate the overall rank by combining the three metric ranks using a weighting factor: Dice (0.5), FPV (0.25), and FNV (0.25). In case of equal ranking, the achieved Dice metric will be used as a tie break.
</p>
<h5>Award category 1: Modelcentric</h5>
<p style="text-align:justify">
Best ranked model wins!
(for Dice: higher score = better, for FP or FN: lower volumes = better)
</p>
<h5>Award category 2: Datacentrism</h5>
<p style="text-align:justify">
Best data-handling wins!
</p>
<hr/>
</section>
<section id="Codes_sec">
<a class="anchor" id="Codes"></a>
<h3>Codes and Models</h3>
<h4>Codes</h4>
<p style="text-align:justify">
<a href="https://github.com/ClinicalDataScience/autoPETIII" class="icon brands alt fa-github">https://github.com/ClinicalDataScience/autoPETIII</a>
</p>
<h4>Models</h4>
<p style="text-align:justify">
Models and documentation of the submitted challenge algorithms can be found in the <a href="#Leaderboard">Leaderboard</a>.
</p>
<hr/>
</section>
<section id="Leaderboard_sec">
<a class="anchor" id="Leaderboard"></a>
<h3>Leaderboard</h3>
<!--
<script>
fetch('./statistics/autopet1_participants.json')
.then(response => response.json())
.then(spec => {
vegaEmbed('#vis', spec).then(function(result) {
// Visualization rendered
}).catch(console.error);
});
</script>
<div id="vis"></div>
-->
<h4>Category 1: Modelcentric</h4>
<p style="text-align:justify">
<div class="table-wrapper">
<table>
<thead>
<tr>
<th>#</th>
<th>Team</th>
<th>Mean Position</th>
<th>Dice (Position)</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th>False Negative Volume (Position)</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th>False Positive Volume (Position)</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th>Model</th>
<th>Documentation</th>
</tr>
<tr>
<th></th>
<th></th>
<th></th>
<th>LMU FDG</th>
<th>LMU PSMA</th>
<th>UKT FDG</th>
<th>UKT PSMA</th>
<th>Overall rank</th>
<th>LMU FDG</th>
<th>LMU PSMA</th>
<th>UKT FDG</th>
<th>UKT PSMA</th>
<th>Overall rank</th>
<th>LMU FDG</th>
<th>LMU PSMA</th>
<th>UKT FDG</th>
<th>UKT PSMA</th>
<th>Overall rank</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>LesionTracer</td>
<td>6.0625</td>
<td>0.6619 (3)</td>
<td>0.6433 (1)</td>
<td>0.7702 (7)</td>
<td>0.5711 (8)</td>
<td>4.75</td>
<td>1.7810 (1)</td>
<td>10.1590 (8)</td>
<td>0.4310 (1)</td>
<td>0.3658 (1)</td>
<td>2.75</td>
<td>1.7030 (1)</td>
<td>1.1123 (9)</td>
<td>3.0189 (23)</td>
<td>5.3053 (11)</td>
<td>12</td>
<td><a href="https://github.com/MIC-DKFZ/autopet-3-submission" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.09478" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>2</td>
<td>IKIM</td>
<td>8.4062</td>
<td>0.6043 (16)</td>
<td>0.5828 (7)</td>
<td>0.7938 (3)</td>
<td>0.6176 (4)</td>
<td>7.5</td>
<td>2.7249 (3)</td>
<td>8.4799 (2)</td>
<td>1.0422 (8)</td>
<td>0.7334 (7)</td>
<td>5.125</td>
<td>4.9373 (17)</td>
<td>1.4709 (14)</td>
<td>2.6586 (19)</td>
<td>3.1085 (4)</td>
<td>13.5</td>
<td><a href="https://github.com/hakal104/autoPETIII" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/html/2409.12155v1" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>3</td>
<td>HussainAlasmawi</td>
<td>8.4375</td>
<td>0.6370 (6)</td>
<td>0.5725 (10)</td>
<td>0.7802 (5)</td>
<td>0.5854 (7)</td>
<td>7.0</td>
<td>2.4542 (2)</td>
<td>10.1997 (9)</td>
<td>0.6365 (2)</td>
<td>0.6265 (4)</td>
<td>4.25</td>
<td>3.3616 (13)</td>
<td>1.7221 (18)</td>
<td>2.8756 (22)</td>
<td>5.2173 (9)</td>
<td>15.5</td>
<td><a href="https://github.com/HussainAlasmawi/AutoPet_Final" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://github.com/HussainAlasmawi/AutoPet_Final/blob/main/paper%20summary.pdf" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>4</td>
<td>StockholmTrio</td>
<td>9.9375</td>
<td>0.5771 (21)</td>
<td>0.6010 (4)</td>
<td>0.7684 (8)</td>
<td>0.6326 (3)</td>
<td>9.0</td>
<td>8.4921 (11)</td>
<td>11.1818 (19)</td>
<td>1.1839 (12)</td>
<td>0.8581 (11)</td>
<td>13.25</td>
<td>1.9526 (9)</td>
<td>0.8850 (6)</td>
<td>1.7990 (12)</td>
<td>3.8848 (7)</td>
<td>8.5</td>
<td><a href="https://github.com/Astarakee/AutoPET24" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.14475" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>5</td>
<td>UIH_CRI_SIL</td>
<td>10.0625</td>
<td>0.6194 (14)</td>
<td>0.5937 (5)</td>
<td>0.7922 (4)</td>
<td>0.4111 (18)</td>
<td>10.25</td>
<td>4.6745 (6)</td>
<td>7.5758 (1)</td>
<td>0.9560 (7)</td>
<td>0.4422 (2)</td>
<td>4.0000</td>
<td>1.9500 (8)</td>
<td>2.0641 (21)</td>
<td>2.2770 (15)</td>
<td>12.4480 (19)</td>
<td>15.75</td>
<td><a href="https://github.com/jiayiliu-pku/AP2024" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://www.arxiv.org/abs/2409.09784" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>6</td>
<td>AiraMatrix</td>
<td>11.0625</td>
<td>0.6253 (11)</td>
<td>0.5273 (20)</td>
<td>0.7556 (10)</td>
<td>0.6600 (2)</td>
<td>10.75</td>
<td>12.5127 (17)</td>
<td>15.6801 (28)</td>
<td>1.6105 (16)</td>
<td>1.0412 (16)</td>
<td>19.25</td>
<td>1.8776 (6)</td>
<td>0.4382 (1)</td>
<td>0.8604 (5)</td>
<td>1.7253 (2)</td>
<td>3.5</td>
<td><a href="https://github.com/tanya-chutani-aira/autopetiii" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.13779" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>7</td>
<td>Lennonlychan</td>
<td>11.625</td>
<td>0.6286 (9)</td>
<td>0.5302 (18)</td>
<td>0.7454 (13)</td>
<td>0.4775 (10)</td>
<td>12.5</td>
<td>12.5980 (18)</td>
<td>10.5936 (11)</td>
<td>1.1815 (11)</td>
<td>0.8377 (10)</td>
<td>12.5</td>
<td>1.8980 (7)</td>
<td>1.0702 (8)</td>
<td>1.2379 (7)</td>
<td>5.9060 (14)</td>
<td>9.0</td>
<td><a href="https://github.com/john-lennon-chan/synthPET_inference" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.08068" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>8</td>
<td>QuantIF</td>
<td>11.6875</td>
<td>0.5800 (20)</td>
<td>0.5470 (13)</td>
<td>0.7649 (9)</td>
<td>0.6024 (6)</td>
<td>12.0</td>
<td>6.7282 (8)</td>
<td>11.7130 (20)</td>
<td>2.2904 (20)</td>
<td>0.8140 (8)</td>
<td>14.0</td>
<td>10.0465 (23)</td>
<td>0.6253 (3)</td>
<td>0.7074 (1)</td>
<td>4.2608 (8)</td>
<td>8.75</td>
<td><a href="https://github.com/Zhack47/AutopetIII_CHB/" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2410.02807" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>9</td>
<td>Shadab</td>
<td>12.0625</td>
<td>0.6159 (15)</td>
<td>0.5794 (9)</td>
<td>0.7270 (16)</td>
<td>0.4540 (13)</td>
<td>13.25</td>
<td>11.4725 (15)</td>
<td>9.9943 (7)</td>
<td>0.7998 (6)</td>
<td>0.6623 (5)</td>
<td>8.25</td>
<td>2.1096 (10)</td>
<td>1.4117 (13)</td>
<td>1.9246 (14)</td>
<td>7.0941 (17)</td>
<td>13.5</td>
<td><a href="https://github.com/ahxmeds/autosegnet2024" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://www.arxiv.org/abs/2409.10151" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>10</td>
<td>Zero_sugar</td>
<td>13.25</td>
<td>0.6580 (4)</td>
<td>0.5446 (14)</td>
<td>0.7373 (15)</td>
<td>0.4271 (17)</td>
<td>12.5</td>
<td>12.3233 (16)</td>
<td>11.0423 (17)</td>
<td>4.3788 (25)</td>
<td>1.6138 (22)</td>
<td>20.0000</td>
<td>0.5802 (2)</td>
<td>1.3717 (11)</td>
<td>0.8209 (4)</td>
<td>6.3086 (15)</td>
<td>8.0</td>
<td><a href="https://github.com/alexanderjaus/autopet3_datadiet" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://www.arxiv.org/abs/2409.13548" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>11</td>
<td>BAMF</td>
<td>13.3125</td>
<td>0.6666 (1)</td>
<td>0.5299 (19)</td>
<td>0.6973 (22)</td>
<td>0.4672 (11)</td>
<td>13.25</td>
<td>11.3668 (14)</td>
<td>14.7570 (26)</td>
<td>1.4420 (15)</td>
<td>2.3205 (26)</td>
<td>20.25</td>
<td>3.1945 (11)</td>
<td>0.6220 (2)</td>
<td>1.7017 (10)</td>
<td>2.2677 (3)</td>
<td>6.5</td>
<td><a href="https://github.com/bamf-health/autopet2024" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://github.com/bamf-health/autopet2024/blob/main/Multitracer%20Lesion%20Segmentation%20in%20FDG-18%20and%20PSMA%20Whole-Body%20PET-CT%20scans%20using%20augumented%20labelling%20approach-%20AutoPET%20III%20challenge.docx" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td></td>
<td>datacentric baseline</td>
<td>15.3125</td>
<td>0.6362 (7)</td>
<td>0.5252 (22)</td>
<td>0.7254 (18)</td>
<td>0.4397 (15)</td>
<td>15.5</td>
<td>15.6634 (21)</td>
<td>11.0583 (18)</td>
<td>5.0262 (26)</td>
<td>1.0951 (17)</td>
<td>20.5</td>
<td>0.3659 (1)</td>
<td>1.9186 (19)</td>
<td>0.9542 (6)</td>
<td>5.7609 (13)</td>
<td>9.75</td>
<td><a href="https://github.com/ClinicalDataScience/autoPETIII/tree/main/datacentric-baseline" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td></td>
</tr>
<tr>
<td>12</td>
<td>DING1122</td>
<td>18.75</td>
<td>0.6281 (10)</td>
<td>0.5390 (15)</td>
<td>0.7267 (17)</td>
<td>0.1062 (27)</td>
<td>17.25</td>
<td>14.0621 (19)</td>
<td>10.8470 (15)</td>
<td>1.3436 (14)</td>
<td>2.0637 (23)</td>
<td>17.75</td>
<td>11.2634 (25)</td>
<td>2.5160 (23)</td>
<td>2.3319 (16)</td>
<td>127.1480 (27)</td>
<td>22.75</td>
<td><a href="https://github.com/cwwang1979/CW-nnU-Net-for-PET-CT" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.07144" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>13</td>
<td>WukongRT</td>
<td>18.875</td>
<td>0.6204 (13)</td>
<td>0.5206 (23)</td>
<td>0.7221 (19)</td>
<td>0.3386 (22)</td>
<td>19.25</td>
<td>9.3683 (12)</td>
<td>11.7982 (21)</td>
<td>1.1040 (10)</td>
<td>1.0006 (15)</td>
<td>14.5</td>
<td>6.7212 (20)</td>
<td>3.3785 (25)</td>
<td>3.5422 (24)</td>
<td>18.7775 (21)</td>
<td>22.5</td>
<td><a href="https://github.com/BBQtime/Sine-Wave-Normalization-for-Deep-Learning-Based-Tumor-Segmentation-in-CT-PET/tree/main" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.13410" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>14</td>
<td>HKURad</td>
<td>19.375</td>
<td>0.6026 (17)</td>
<td>0.4955 (25)</td>
<td>0.6973 (21)</td>
<td>0.3496 (21)</td>
<td>21.0</td>
<td>10.8671 (13)</td>
<td>12.9096 (24)</td>
<td>3.3258 (21)</td>
<td>1.1034 (18)</td>
<td>19.0</td>
<td>4.8969 (16)</td>
<td>1.3913 (12)</td>
<td>2.6552 (18)</td>
<td>13.1626 (20)</td>
<td>16.5</td>
<td><a href="https://github.com/Reza-Safdari/AutoPET_III" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.13006" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>15</td>
<td>max.sh</td>
<td>19.5312</td>
<td>0.4012 (26)</td>
<td>0.5811 (8)</td>
<td>0.7483 (12)</td>
<td>0.0285 (29)</td>
<td>18.5</td>
<td>30.2820 (27)</td>
<td>9.2688 (3)</td>
<td>0.6570 (4)</td>
<td>2.1117 (25)</td>
<td>14.5</td>
<td>924.3099 (28)</td>
<td>3.0797 (24)</td>
<td>3.8532 (26)</td>
<td>924.3099 (29)</td>
<td>26.625</td>
<td><a href="https://github.com/maxshatskiy/autopet3_inf" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.0068" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>16</td>
<td>Shrajanbhandary</td>
<td>21.75</td>
<td>0.2669 (29)</td>
<td>0.6125 (3)</td>
<td>0.1875 (29)</td>
<td>0.3597 (20)</td>
<td>20.2500</td>
<td>32.2533 (28)</td>
<td>9.4384 (4)</td>
<td>54.5663 (29)</td>
<td>2.3478 (27)</td>
<td>22.0</td>
<td>5.4561 (18)</td>
<td>8429 (29)</td>
<td>13538 (29)</td>
<td>24.7666 (22)</td>
<td>24.5</td>
<td><a href="https://github.com/Shrajan/fcn_pet_segmentation" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://autopet-iii.grand-challenge.org/results/grandchallenge" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td></td>
<td>nnunet baseline</td>
<td>24.0</td>
<td>0.5017 (24)</td>
<td>0.4741 (27)</td>
<td>0.6822 (25)</td>
<td>0.1512 (26)</td>
<td>25.5</td>
<td>14.0836 (20)</td>
<td>12.5885 (23)</td>
<td>1.6795 (17)</td>
<td>0.9905 (14)</td>
<td>18.5000</td>
<td>11.7733 (26)</td>
<td>4.9485 (27)</td>
<td>6.6433 (27)</td>
<td>102.6461 (26)</td>
<td>26.5</td>
<td><a href="https://github.com/ClinicalDataScience/autoPETIII/tree/main/nnunet-baseline" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td></td>
</tr>
<tr>
<td>17</td>
<td>TUM_ibbm</td>
<td>26.25</td>
<td>0.2845 (28)</td>
<td>0.2031 (29)</td>
<td>0.3639 (28)</td>
<td>0.1891 (25)</td>
<td>27.5</td>
<td>3.6257 (5)</td>
<td>28.4085 (29)</td>
<td>9.4993 (27)</td>
<td>3.4622 (29)</td>
<td>22.5000</td>
<td>231.9891 (29)</td>
<td>216.4435 (28)</td>
<td>250.7309 (28)</td>
<td>89.9235 (25)</td>
<td>27.5</td>
<td><a href="https://github.com/hongweilibran/autopet_bran" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
</tbody>
</table>
</div>
</p>
<h4>Category 2: Datacentric</h4>
<p style="text-align:justify">
<div class="table-wrapper">
<table>
<thead>
<tr>
<th>#</th>
<th>Team</th>
<th>Mean Position</th>
<th>Dice (Position)</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th>False Negative Volume (Position)</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th>False Positive Volume (Position)</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th>Model</th>
<th>Documentation</th>
</tr>
<tr>
<th></th>
<th></th>
<th></th>
<th>LMU FDG</th>
<th>LMU PSMA</th>
<th>UKT FDG</th>
<th>UKT PSMA</th>
<th>Overall rank</th>
<th>LMU FDG</th>
<th>LMU PSMA</th>
<th>UKT FDG</th>
<th>UKT PSMA</th>
<th>Overall rank</th>
<th>LMU FDG</th>
<th>LMU PSMA</th>
<th>UKT FDG</th>
<th>UKT PSMA</th>
<th>Overall rank</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>Lennonlychan</td>
<td>2.5625</td>
<td>0.6286 (5)</td>
<td>0.5302 (3)</td>
<td>0.7454 (1)</td>
<td>0.4775 (1)</td>
<td>2.5</td>
<td>12.5980 (2)</td>
<td>10.5936 (2)</td>
<td>1.1815 (1)</td>
<td>0.8377 (2)</td>
<td>1.75</td>
<td>1.8980 (5)</td>
<td>1.0702 (1)</td>
<td>1.2379 (4)</td>
<td>5.9060 (4)</td>
<td>3.5</td>
<td><a href="https://github.com/john-lennon-chan/synthPET_inference" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.08068" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>2</td>
<td>Zero_sugar</td>
<td>3.125</td>
<td>0.6580 (1)</td>
<td>0.5446 (1)</td>
<td>0.7373 (3)</td>
<td>0.4271 (5)</td>
<td>2.5</td>
<td>12.3233 (1)</td>
<td>11.0423 (5)</td>
<td>4.3788 (5)</td>
<td>1.6138 (7)</td>
<td>4.5</td>
<td>0.5802 (2)</td>
<td>1.3717 (3)</td>
<td>0.8209 (2)</td>
<td>6.3086 (5)</td>
<td>3.0</td>
<td><a href="https://github.com/alexanderjaus/autopet3_datadiet" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://www.arxiv.org/abs/2409.13548" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td></td>
<td>datacentric baseline</td>
<td>4.0</td>
<td>0.6362 (3)</td>
<td>0.5252 (5)</td>
<td>0.7254 (4)</td>
<td>0.4397 (4)</td>
<td>4.0</td>
<td>15.6634 (3)</td>
<td>11.0583 (6)</td>
<td>5.0262 (6)</td>
<td>1.0951 (4)</td>
<td>4.75</td>
<td>0.3659 (1)</td>
<td>1.9186 (6)</td>
<td>0.9542 (3)</td>
<td>5.7609 (3)</td>
<td>3.25</td>
<td><a href="https://github.com/ClinicalDataScience/autoPETIII/tree/main/datacentric-baseline" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td></td>
</tr>
<tr>
<td>3</td>
<td>LesionTracer</td>
<td>5.8125</td>
<td>0.6218 (6)</td>
<td>0.4466 (7)</td>
<td>0.6110 (7)</td>
<td>0.3611 (6)</td>
<td>6.5</td>
<td>21.4829 (6)</td>
<td>14.7929 (7)</td>
<td>19.6584 (7)</td>
<td>1.2459 (5)</td>
<td>6.25</td>
<td>0.7751 (3)</td>
<td>1.1609 (2)</td>
<td>1.7559 (5)</td>
<td>10.6363 (6)</td>
<td>4.0</td>
<td><a href="https://github.com/MIC-DKFZ/miccai2024_autopet3_datacentric" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://arxiv.org/abs/2409.10120" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
<tr>
<td>4</td>
<td>UIH_CRI_SIL</td>
<td>5.8125</td>
<td>0.4298 (7)</td>
<td>0.4925 (6)</td>
<td>0.6469 (6)</td>
<td>0.2903 (7)</td>
<td>6.5</td>
<td>23.3383 (7)</td>
<td>9.5916 (1)</td>
<td>4.3688 (4)</td>
<td>0.5800 (1)</td>
<td>3.25</td>
<td>8.6082 (7)</td>
<td>4.5019 (7)</td>
<td>2.7256 (7)</td>
<td>30.6340 (7)</td>
<td>7.0</td>
<td><a href="https://github.com/jiayiliu-pku/DC2024" class="icon solid alt minor fa-code"><span class="label">Model</span></a></td>
<td><a href="https://www.arxiv.org/abs/2409.09784" class="icon solid alt minor fa-paperclip"><span class="label">Preprint</span></a></td>
</tr>
</tbody>
</table>
</div>
</p>
<hr/>
</section>
<section id="Organizers_sec">
<a class="anchor" id="Organizers"></a>
<h3>Organizers</h3>
<p style="text-align:justify">
<div class="col-4 col-6-xsmall"><span class="image fit"><img src="images/lmu.png" alt="" style="width: 20%; height: auto;"/></span></div>
University Hospital of the LMU in Munich
<ul>
<li> <a href="https://www.lmu-klinikum.de/radiologie/klinik-kompakt/arztlicher-dienst/55c11d6dafa8dc71" style="" target="_blank">Clemens Cyran</a></li>
<li> <a href="https://www.osc.uni-muehttps://www.lmu-klinikum.de/radiologie/forschung/clinical-data-science/0e8a3ac188dad3f9nchen.de/members/individual-members/ingrisch1/index.html" style="" target="_blank">Michael Ingrisch</a></li>
<li>Jakob Dexl</li>
<li>Katharina Jeblick</li>
<li>Balthasar Schachtner</li>
<li>Andreas Mittermeier</li>