@@ -40,7 +40,7 @@ def test_compute_confusion_matrix():
40
40
41
41
score_thresholds = np .array ([0.25 , 0.75 ], dtype = np .float64 )
42
42
43
- confusion_matrix , missing_predictions = compute_confusion_matrix (
43
+ confusion_matrix , unmatched_ground_truths = compute_confusion_matrix (
44
44
data = data ,
45
45
label_metadata = label_metadata ,
46
46
score_thresholds = score_thresholds ,
@@ -74,15 +74,15 @@ def test_compute_confusion_matrix():
74
74
)
75
75
).all ()
76
76
77
- assert missing_predictions .shape == (2 , 4 , 1 )
77
+ assert unmatched_ground_truths .shape == (2 , 4 , 1 )
78
78
assert (
79
79
# score >= 0.25
80
- missing_predictions [0 , :, 0 ]
80
+ unmatched_ground_truths [0 , :, 0 ]
81
81
== np .array ([- 1.0 , - 1.0 , - 1.0 , - 1.0 ])
82
82
).all ()
83
83
assert (
84
84
# score >= 0.75
85
- missing_predictions [1 , :, 0 ]
85
+ unmatched_ground_truths [1 , :, 0 ]
86
86
== np .array ([- 1.0 , - 1.0 , - 1.0 , 1.0 ])
87
87
).all ()
88
88
@@ -144,7 +144,7 @@ def test_confusion_matrix_basic(basic_classifications: list[Classification]):
144
144
}
145
145
},
146
146
},
147
- "missing_predictions " : {},
147
+ "unmatched_ground_truths " : {},
148
148
},
149
149
"parameters" : {
150
150
"score_threshold" : 0.25 ,
@@ -166,7 +166,7 @@ def test_confusion_matrix_basic(basic_classifications: list[Classification]):
166
166
},
167
167
}
168
168
},
169
- "missing_predictions " : {
169
+ "unmatched_ground_truths " : {
170
170
"3" : {"count" : 1 , "examples" : [{"datum" : "uid2" }]}
171
171
},
172
172
},
@@ -179,7 +179,7 @@ def test_confusion_matrix_basic(basic_classifications: list[Classification]):
179
179
for m in actual_metrics :
180
180
_filter_elements_with_zero_count (
181
181
cm = m ["value" ]["confusion_matrix" ],
182
- mp = m ["value" ]["missing_predictions " ],
182
+ mp = m ["value" ]["unmatched_ground_truths " ],
183
183
)
184
184
assert m in expected_metrics
185
185
for m in expected_metrics :
@@ -212,7 +212,7 @@ def test_confusion_matrix_unit(
212
212
"1" : {"1" : {"count" : 1 , "examples" : []}},
213
213
"2" : {"1" : {"count" : 2 , "examples" : []}},
214
214
},
215
- "missing_predictions " : {},
215
+ "unmatched_ground_truths " : {},
216
216
},
217
217
"parameters" : {
218
218
"score_threshold" : 0.5 ,
@@ -223,7 +223,7 @@ def test_confusion_matrix_unit(
223
223
for m in actual_metrics :
224
224
_filter_elements_with_zero_count (
225
225
cm = m ["value" ]["confusion_matrix" ],
226
- mp = m ["value" ]["missing_predictions " ],
226
+ mp = m ["value" ]["unmatched_ground_truths " ],
227
227
)
228
228
assert m in expected_metrics
229
229
for m in expected_metrics :
@@ -282,7 +282,7 @@ def test_confusion_matrix_with_animal_example(
282
282
}
283
283
},
284
284
},
285
- "missing_predictions " : {
285
+ "unmatched_ground_truths " : {
286
286
"dog" : {"count" : 1 , "examples" : [{"datum" : "uid5" }]}
287
287
},
288
288
},
@@ -295,7 +295,7 @@ def test_confusion_matrix_with_animal_example(
295
295
for m in actual_metrics :
296
296
_filter_elements_with_zero_count (
297
297
cm = m ["value" ]["confusion_matrix" ],
298
- mp = m ["value" ]["missing_predictions " ],
298
+ mp = m ["value" ]["unmatched_ground_truths " ],
299
299
)
300
300
assert m in expected_metrics
301
301
for m in expected_metrics :
@@ -356,7 +356,7 @@ def test_confusion_matrix_with_color_example(
356
356
}
357
357
},
358
358
},
359
- "missing_predictions " : {
359
+ "unmatched_ground_truths " : {
360
360
"red" : {"count" : 1 , "examples" : [{"datum" : "uid2" }]}
361
361
},
362
362
},
@@ -369,7 +369,7 @@ def test_confusion_matrix_with_color_example(
369
369
for m in actual_metrics :
370
370
_filter_elements_with_zero_count (
371
371
cm = m ["value" ]["confusion_matrix" ],
372
- mp = m ["value" ]["missing_predictions " ],
372
+ mp = m ["value" ]["unmatched_ground_truths " ],
373
373
)
374
374
assert m in expected_metrics
375
375
for m in expected_metrics :
@@ -438,7 +438,7 @@ def test_confusion_matrix_multiclass(
438
438
}
439
439
},
440
440
},
441
- "missing_predictions " : {},
441
+ "unmatched_ground_truths " : {},
442
442
},
443
443
"parameters" : {
444
444
"score_threshold" : 0.05 ,
@@ -466,7 +466,7 @@ def test_confusion_matrix_multiclass(
466
466
}
467
467
},
468
468
},
469
- "missing_predictions " : {
469
+ "unmatched_ground_truths " : {
470
470
"cat" : {
471
471
"count" : 2 ,
472
472
"examples" : [{"datum" : "uid0" }, {"datum" : "uid2" }],
@@ -483,7 +483,7 @@ def test_confusion_matrix_multiclass(
483
483
"type" : "ConfusionMatrix" ,
484
484
"value" : {
485
485
"confusion_matrix" : {},
486
- "missing_predictions " : {
486
+ "unmatched_ground_truths " : {
487
487
"cat" : {
488
488
"count" : 2 ,
489
489
"examples" : [{"datum" : "uid0" }, {"datum" : "uid2" }],
@@ -504,7 +504,7 @@ def test_confusion_matrix_multiclass(
504
504
for m in actual_metrics :
505
505
_filter_elements_with_zero_count (
506
506
cm = m ["value" ]["confusion_matrix" ],
507
- mp = m ["value" ]["missing_predictions " ],
507
+ mp = m ["value" ]["unmatched_ground_truths " ],
508
508
)
509
509
assert m in expected_metrics
510
510
for m in expected_metrics :
@@ -560,7 +560,7 @@ def test_confusion_matrix_without_hardmax_animal_example(
560
560
},
561
561
}
562
562
},
563
- "missing_predictions " : {},
563
+ "unmatched_ground_truths " : {},
564
564
},
565
565
"parameters" : {
566
566
"score_threshold" : 0.05 ,
@@ -580,7 +580,7 @@ def test_confusion_matrix_without_hardmax_animal_example(
580
580
}
581
581
}
582
582
},
583
- "missing_predictions " : {},
583
+ "unmatched_ground_truths " : {},
584
584
},
585
585
"parameters" : {
586
586
"score_threshold" : 0.4 ,
@@ -591,7 +591,7 @@ def test_confusion_matrix_without_hardmax_animal_example(
591
591
"type" : "ConfusionMatrix" ,
592
592
"value" : {
593
593
"confusion_matrix" : {},
594
- "missing_predictions " : {
594
+ "unmatched_ground_truths " : {
595
595
"ant" : {
596
596
"count" : 1 ,
597
597
"examples" : [
@@ -611,7 +611,7 @@ def test_confusion_matrix_without_hardmax_animal_example(
611
611
for m in actual_metrics :
612
612
_filter_elements_with_zero_count (
613
613
cm = m ["value" ]["confusion_matrix" ],
614
- mp = m ["value" ]["missing_predictions " ],
614
+ mp = m ["value" ]["unmatched_ground_truths " ],
615
615
)
616
616
assert m in expected_metrics
617
617
for m in expected_metrics :
0 commit comments