1
- import random
2
1
3
2
4
3
def evaluate (user_submission_file , phase_codename , test_annotation_file = None , ** kwargs ):
@@ -11,7 +10,11 @@ def evaluate(user_submission_file, phase_codename, test_annotation_file=None, **
11
10
12
11
`test_annotations_file`: Path to test_annotation_file on the server
13
12
We recommend setting a default `test_annotation_file` or using `phase_codename`
14
- to select the appropriate file.
13
+ to select the appropriate file. For example, you could load test annotation file
14
+ for current phase as:
15
+ ```
16
+ test_annotation_file = json.loads(open("{phase_codename}_path", "r"))
17
+ ```
15
18
`**kwargs`: keyword arguments that contains additional submission
16
19
metadata that challenge hosts can use to send slack notification.
17
20
You can access the submission metadata
@@ -39,43 +42,35 @@ def evaluate(user_submission_file, phase_codename, test_annotation_file=None, **
39
42
'submitted_at': u'2017-03-20T19:22:03.880652Z'
40
43
}
41
44
"""
45
+
46
+ '''
47
+ # Load test annotation file for current phase
48
+ test_annotation_file = json.loads(open("{phase_codename}_path", "r"))
49
+ '''
42
50
output = {}
43
51
if phase_codename == "dev" :
44
52
print ("Evaluating for Dev Phase" )
45
53
output ["result" ] = [
46
54
{
47
- "train_split" : {
48
- "Metric1" : random .randint (0 , 99 ),
49
- "Metric2" : random .randint (0 , 99 ),
50
- "Metric3" : random .randint (0 , 99 ),
51
- "Total" : random .randint (0 , 99 ),
52
- }
53
- }
55
+ "split" : "train_split" ,
56
+ "show_to_participant" : True ,
57
+ "accuracies" : {"Metric1" : 90 },
58
+ },
54
59
]
55
- # To display the results in the result file
56
- output ["submission_result" ] = output ["result" ][0 ]["train_split" ]
57
60
print ("Completed evaluation for Dev Phase" )
58
61
elif phase_codename == "test" :
59
62
print ("Evaluating for Test Phase" )
60
63
output ["result" ] = [
61
64
{
62
- "train_split" : {
63
- "Metric1" : random .randint (0 , 99 ),
64
- "Metric2" : random .randint (0 , 99 ),
65
- "Metric3" : random .randint (0 , 99 ),
66
- "Total" : random .randint (0 , 99 ),
67
- }
65
+ "split" : "train_split" ,
66
+ "show_to_participant" : True ,
67
+ "accuracies" : {"Metric1" : 90 },
68
68
},
69
69
{
70
- "test_split" : {
71
- "Metric1" : random .randint (0 , 99 ),
72
- "Metric2" : random .randint (0 , 99 ),
73
- "Metric3" : random .randint (0 , 99 ),
74
- "Total" : random .randint (0 , 99 ),
75
- }
70
+ "split" : "test_split" ,
71
+ "show_to_participant" : False ,
72
+ "accuracies" : {"Metric1" : 50 , "Metric2" : 40 },
76
73
},
77
74
]
78
- # To display the results in the result file
79
- output ["submission_result" ] = output ["result" ][0 ]
80
75
print ("Completed evaluation for Test Phase" )
81
76
return output
0 commit comments