Skip to content

Commit 3e9baba

Browse files
committed
Tediously update all enum usages to use Enum.KEY.value instead of Enum.KEY, which works on Python<3.11 but broke in 3.11 (see: python/cpython#100458)
1 parent 6093dce commit 3e9baba

25 files changed

+113
-114
lines changed

kolena/_api/v1/batched_load.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,11 @@ class Path(str, Enum):
2525

2626
@classmethod
2727
def upload_signed_url(cls, load_uuid: str) -> str:
28-
return f"{cls.UPLOAD_SIGNED_URL_STUB}/{load_uuid}"
28+
return f"{cls.UPLOAD_SIGNED_URL_STUB.value}/{load_uuid}"
2929

3030
@classmethod
3131
def download_by_path(cls, path: str) -> str:
32-
return f"{cls.DOWNLOAD_BY_PATH_STUB}/{path}"
32+
return f"{cls.DOWNLOAD_BY_PATH_STUB.value}/{path}"
3333

3434
@dataclass(frozen=True)
3535
class WithLoadUUID:

kolena/_api/v1/samples.py

-19
This file was deleted.

kolena/_utils/_consts.py

-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616

1717
class _BatchSize(int, Enum):
18-
UPLOAD_CHIPS = 5_000
1918
UPLOAD_RECORDS = 10_000_000
2019
UPLOAD_RESULTS = 1_000_000
2120

kolena/_utils/batched_load.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545

4646

4747
def init_upload() -> API.InitiateUploadResponse:
48-
init_res = krequests.put(endpoint_path=API.Path.INIT_UPLOAD)
48+
init_res = krequests.put(endpoint_path=API.Path.INIT_UPLOAD.value)
4949
krequests.raise_for_status(init_res)
5050
init_response = from_dict(data_class=API.InitiateUploadResponse, data=init_res.json())
5151
return init_response
@@ -81,7 +81,7 @@ def upload_data_frame_chunk(df_chunk: pd.DataFrame, load_uuid: str) -> None:
8181
def upload_image_chips(
8282
df: _ImageChipsDataFrame,
8383
path_mapper: AssetPathMapper,
84-
batch_size: int = _BatchSize.UPLOAD_CHIPS,
84+
batch_size: int = _BatchSize.UPLOAD_CHIPS.value,
8585
) -> None:
8686
def upload_batch(df_batch: _ImageChipsDataFrame) -> None:
8787
df_batch = df_batch.reset_index(drop=True) # reset indices so we match the signed_url indices
@@ -106,7 +106,7 @@ def as_buffer(image_raw: np.ndarray) -> io.BytesIO:
106106
],
107107
)
108108
upload_response = krequests.put(
109-
endpoint_path=AssetAPI.Path.BULK_UPLOAD,
109+
endpoint_path=AssetAPI.Path.BULK_UPLOAD.value,
110110
data=data,
111111
headers={"Content-Type": data.content_type},
112112
)
@@ -157,7 +157,7 @@ def complete_load(uuid: Optional[str]) -> None:
157157
return
158158
complete_request = API.CompleteDownloadRequest(uuid=uuid)
159159
complete_res = krequests.put(
160-
endpoint_path=API.Path.COMPLETE_DOWNLOAD,
160+
endpoint_path=API.Path.COMPLETE_DOWNLOAD.value,
161161
data=json.dumps(dataclasses.asdict(complete_request)),
162162
)
163163
krequests.raise_for_status(complete_res)

kolena/_utils/instrumentation.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -54,15 +54,15 @@ def upload_log(message: str, status: str) -> None:
5454
message=message,
5555
status=status,
5656
)
57-
krequests.post(endpoint_path=API.Path.UPLOAD, json=dataclasses.asdict(request))
57+
krequests.post(endpoint_path=API.Path.UPLOAD.value, json=dataclasses.asdict(request))
5858

5959

6060
def log_telemetry(e: BaseException) -> None:
6161
try:
6262
stack = tb.format_stack()
6363
exc_format = tb.format_exception(None, e, e.__traceback__)
6464
combined = stack + exc_format
65-
upload_log("".join(combined), DatadogLogLevels.ERROR)
65+
upload_log("".join(combined), DatadogLogLevels.ERROR.value)
6666
except BaseException:
6767
"""
6868
Attempting to upload the telemetry is best-effort. We don't want to have exceptions in that

kolena/_utils/repository.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
def create(repository: str) -> None:
2323
response = krequests.post(
24-
endpoint_path=Path.CREATE,
24+
endpoint_path=Path.CREATE.value,
2525
data=json.dumps(dataclasses.asdict(CreateRepositoryRequest(repository=repository))),
2626
)
2727
krequests.raise_for_status(response)

kolena/detection/_internal/model.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def __init__(self, name: str, workflow: WorkflowType, metadata: Optional[Dict[st
9393
def _create(cls, workflow: WorkflowType, name: str, metadata: Dict[str, Any]) -> CoreAPI.EntityData:
9494
log.info(f"creating new model '{name}'")
9595
request = CoreAPI.CreateRequest(name=name, metadata=metadata, workflow=workflow.value)
96-
res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request)))
96+
res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request)))
9797
krequests.raise_for_status(res)
9898
log.success(f"created new model '{name}'")
9999
return from_dict(data_class=CoreAPI.EntityData, data=res.json())
@@ -102,7 +102,7 @@ def _create(cls, workflow: WorkflowType, name: str, metadata: Dict[str, Any]) ->
102102
@validate_arguments(config=ValidatorConfig)
103103
def _load_by_name(cls, name: str) -> CoreAPI.EntityData:
104104
request = CoreAPI.LoadByNameRequest(name=name)
105-
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request)))
105+
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request)))
106106
krequests.raise_for_status(res)
107107
return from_dict(data_class=CoreAPI.EntityData, data=res.json())
108108

@@ -131,7 +131,7 @@ def iter_inferences(
131131
def _iter_inference_batch_for_reference(
132132
self,
133133
test_object: Union[_TestCaseClass, _TestSuiteClass],
134-
batch_size: int = _BatchSize.LOAD_SAMPLES,
134+
batch_size: int = _BatchSize.LOAD_SAMPLES.value,
135135
) -> Iterator[_LoadInferencesDataFrameClass]:
136136
if batch_size <= 0:
137137
raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer")
@@ -143,7 +143,7 @@ def _iter_inference_batch_for_reference(
143143
init_request = API.InitLoadInferencesRequest(**params)
144144
yield from _BatchedLoader.iter_data(
145145
init_request=init_request,
146-
endpoint_path=API.Path.INIT_LOAD_INFERENCES,
146+
endpoint_path=API.Path.INIT_LOAD_INFERENCES.value,
147147
df_class=self._LoadInferencesDataFrameClass,
148148
)
149149
log.success(f"loaded inferences from model '{self.name}' on {test_object_display_name}")
@@ -166,7 +166,7 @@ def load_inferences_by_test_case(
166166
def _iter_inference_batch_for_test_suite(
167167
self,
168168
test_suite: _TestSuiteClass,
169-
batch_size: int = _BatchSize.LOAD_SAMPLES,
169+
batch_size: int = _BatchSize.LOAD_SAMPLES.value,
170170
) -> Iterator[_LoadInferencesDataFrameClass]:
171171
if batch_size <= 0:
172172
raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer")
@@ -175,7 +175,7 @@ def _iter_inference_batch_for_test_suite(
175175
init_request = API.InitLoadInferencesByTestCaseRequest(**params)
176176
yield from _BatchedLoader.iter_data(
177177
init_request=init_request,
178-
endpoint_path=API.Path.INIT_LOAD_INFERENCES_BY_TEST_CASE,
178+
endpoint_path=API.Path.INIT_LOAD_INFERENCES_BY_TEST_CASE.value,
179179
df_class=self._LoadInferencesDataFrameClass,
180180
)
181181
log.success(f"loaded inferences from model '{self.name}' on test suite '{test_suite.name}'")

kolena/detection/_internal/test_case.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ def _create(
128128
"""Create a new test case with the provided name."""
129129
log.info(f"creating new test case '{name}'")
130130
request = CoreAPI.CreateRequest(name=name, description=description or "", workflow=workflow.value)
131-
res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request)))
131+
res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request)))
132132
krequests.raise_for_status(res)
133133
data = from_dict(data_class=CoreAPI.EntityData, data=res.json())
134134
obj = cls._create_from_data(data)
@@ -142,7 +142,7 @@ def _create(
142142
def _load_by_name(cls, name: str, version: Optional[int] = None) -> CoreAPI.EntityData:
143143
"""Load an existing test case with the provided name."""
144144
request = CoreAPI.LoadByNameRequest(name=name, version=version)
145-
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request)))
145+
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request)))
146146
krequests.raise_for_status(res)
147147
return from_dict(data_class=CoreAPI.EntityData, data=res.json())
148148

@@ -173,10 +173,10 @@ def load_images(self) -> List[_TestImageClass]:
173173
def iter_images(self) -> Iterator[_TestImageClass]:
174174
"""Iterate through all images with their associated ground truths in this test case."""
175175
log.info(f"loading test images for test case '{self.name}'")
176-
init_request = CoreAPI.InitLoadContentsRequest(batch_size=_BatchSize.LOAD_SAMPLES, test_case_id=self._id)
176+
init_request = CoreAPI.InitLoadContentsRequest(batch_size=_BatchSize.LOAD_SAMPLES.value, test_case_id=self._id)
177177
for df in _BatchedLoader.iter_data(
178178
init_request=init_request,
179-
endpoint_path=API.Path.INIT_LOAD_IMAGES,
179+
endpoint_path=API.Path.INIT_LOAD_IMAGES.value,
180180
df_class=self._TestImageDataFrameClass,
181181
):
182182
for record in df.itertuples():
@@ -312,7 +312,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]:
312312
init_response = init_upload()
313313
df = self._to_data_frame(list(editor._images.values()))
314314
df_serialized = df.as_serializable()
315-
upload_data_frame(df=df_serialized, batch_size=_BatchSize.UPLOAD_RECORDS, load_uuid=init_response.uuid)
315+
upload_data_frame(df=df_serialized, batch_size=_BatchSize.UPLOAD_RECORDS.value, load_uuid=init_response.uuid)
316316

317317
request = CoreAPI.CompleteEditRequest(
318318
test_case_id=self._id,
@@ -322,7 +322,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]:
322322
uuid=init_response.uuid,
323323
)
324324
complete_res = krequests.put(
325-
endpoint_path=API.Path.COMPLETE_EDIT,
325+
endpoint_path=API.Path.COMPLETE_EDIT.value,
326326
data=json.dumps(dataclasses.asdict(request)),
327327
)
328328
krequests.raise_for_status(complete_res)

kolena/detection/_internal/test_run.py

+17-8
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,10 @@ def __init__(
9797
test_suite_ids=[test_suite._id],
9898
config=config,
9999
)
100-
res = krequests.post(endpoint_path=API.Path.CREATE_OR_RETRIEVE, data=json.dumps(dataclasses.asdict(request)))
100+
res = krequests.post(
101+
endpoint_path=API.Path.CREATE_OR_RETRIEVE.value,
102+
data=json.dumps(dataclasses.asdict(request)),
103+
)
101104
krequests.raise_for_status(res)
102105
response = from_dict(data_class=API.CreateOrRetrieveResponse, data=res.json())
103106
self._id = response.test_run_id
@@ -128,7 +131,7 @@ def __exit__(
128131
self._submit_custom_metrics()
129132
self._active = False
130133
if exc_type is not None:
131-
report_crash(self._id, API.Path.MARK_CRASHED)
134+
report_crash(self._id, API.Path.MARK_CRASHED.value)
132135

133136
@validate_arguments(config=ValidatorConfig)
134137
def add_inferences(self, image: _TestImageClass, inferences: Optional[List[_InferenceClass]]) -> None:
@@ -160,7 +163,7 @@ def add_inferences(self, image: _TestImageClass, inferences: Optional[List[_Infe
160163

161164
self._inferences[image_id] = context_image_inferences
162165

163-
if self._n_inferences >= _BatchSize.UPLOAD_RESULTS:
166+
if self._n_inferences >= _BatchSize.UPLOAD_RESULTS.value:
164167
log.info(f"uploading batch of '{self._n_inferences}' inference results")
165168
self._upload_chunk()
166169
log.success(f"uploaded batch of '{self._n_inferences}' inference results")
@@ -176,7 +179,7 @@ def iter_images(self) -> Iterator[_TestImageClass]:
176179
yield self._image_from_load_image_record(record)
177180

178181
@validate_arguments(config=ValidatorConfig)
179-
def load_images(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> List[_TestImageClass]:
182+
def load_images(self, batch_size: int = _BatchSize.LOAD_SAMPLES.value) -> List[_TestImageClass]:
180183
"""
181184
Returns a list of images that still need inferences evaluated, bounded in count
182185
by batch_size. Note that image ground truths will be excluded from the returned
@@ -195,7 +198,10 @@ def load_images(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> List[_TestIm
195198
return [self._image_from_load_image_record(record) for record in df_image_batch.itertuples()]
196199

197200
@validate_arguments(config=ValidatorConfig)
198-
def _iter_image_batch(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> Iterator[_LoadTestImagesDataFrameClass]:
201+
def _iter_image_batch(
202+
self,
203+
batch_size: int = _BatchSize.LOAD_SAMPLES.value,
204+
) -> Iterator[_LoadTestImagesDataFrameClass]:
199205
if batch_size <= 0:
200206
raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer")
201207
init_request = API.InitLoadRemainingImagesRequest(
@@ -205,7 +211,7 @@ def _iter_image_batch(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> Iterat
205211
)
206212
yield from _BatchedLoader.iter_data(
207213
init_request=init_request,
208-
endpoint_path=API.Path.INIT_LOAD_REMAINING_IMAGES,
214+
endpoint_path=API.Path.INIT_LOAD_REMAINING_IMAGES.value,
209215
df_class=self._LoadTestImagesDataFrameClass,
210216
)
211217

@@ -239,7 +245,7 @@ def _finalize_upload(self) -> None:
239245
log.info("finalizing inference upload for test run")
240246
request = API.UploadImageResultsRequest(uuid=self._upload_uuid, test_run_id=self._id, reset=self._reset)
241247
finalize_res = krequests.put(
242-
endpoint_path=API.Path.UPLOAD_IMAGE_RESULTS,
248+
endpoint_path=API.Path.UPLOAD_IMAGE_RESULTS.value,
243249
data=json.dumps(dataclasses.asdict(request)),
244250
)
245251
krequests.raise_for_status(finalize_res)
@@ -289,6 +295,9 @@ def _submit_custom_metrics(self) -> None:
289295
log.info("submitting custom metrics for test run")
290296
custom_metrics = self._compute_custom_metrics()
291297
request = API.UpdateCustomMetricsRequest(model_id=self._model._id, metrics=custom_metrics)
292-
res = krequests.put(endpoint_path=API.Path.UPLOAD_CUSTOM_METRICS, data=json.dumps(dataclasses.asdict(request)))
298+
res = krequests.put(
299+
endpoint_path=API.Path.UPLOAD_CUSTOM_METRICS.value,
300+
data=json.dumps(dataclasses.asdict(request)),
301+
)
293302
krequests.raise_for_status(res)
294303
log.success("submitted custom metrics for test run")

kolena/detection/_internal/test_suite.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def _create(
112112
"""Create a new test suite with the provided name."""
113113
log.info(f"creating new test suite '{name}'")
114114
request = CoreAPI.TestSuite.CreateRequest(name=name, description=description or "", workflow=workflow.value)
115-
res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request)))
115+
res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request)))
116116
krequests.raise_for_status(res)
117117
data = from_dict(data_class=CoreAPI.TestSuite.EntityData, data=res.json())
118118
obj = cls._create_from_data(data)
@@ -127,7 +127,7 @@ def _load_by_name(cls, name: str, version: Optional[int] = None) -> CoreAPI.Test
127127
"""Retrieve the existing test suite with the provided name."""
128128
request = CoreAPI.TestSuite.LoadByNameRequest(name=name, version=version)
129129
data = json.dumps(dataclasses.asdict(request))
130-
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=data)
130+
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=data)
131131
krequests.raise_for_status(res)
132132
return from_dict(data_class=CoreAPI.TestSuite.EntityData, data=res.json())
133133

@@ -298,7 +298,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]:
298298
test_case_ids=list(editor._test_cases.values()),
299299
)
300300
data = json.dumps(dataclasses.asdict(request))
301-
res = krequests.post(endpoint_path=API.Path.EDIT, data=data)
301+
res = krequests.post(endpoint_path=API.Path.EDIT.value, data=data)
302302
krequests.raise_for_status(res)
303303
log.success(f"updated test suite '{self.name}'")
304304
test_suite_data = from_dict(data_class=CoreAPI.TestSuite.EntityData, data=res.json())

kolena/detection/test_image.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -103,10 +103,10 @@ def iter_images(dataset: Optional[str] = None) -> Iterator[TestImage]:
103103
:param dataset: optionally specify the single dataset to be retrieved. By default, images from all
104104
datasets are returned
105105
"""
106-
init_request = API.InitLoadImagesRequest(dataset=dataset, batch_size=_BatchSize.LOAD_RECORDS)
106+
init_request = API.InitLoadImagesRequest(dataset=dataset, batch_size=_BatchSize.LOAD_RECORDS.value)
107107
for df in _BatchedLoader.iter_data(
108108
init_request=init_request,
109-
endpoint_path=API.Path.INIT_LOAD_IMAGES,
109+
endpoint_path=API.Path.INIT_LOAD_IMAGES.value,
110110
df_class=TestImageDataFrame,
111111
):
112112
for record in df.itertuples():

kolena/fr/model.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def create(cls, name: str, metadata: Dict[str, Any]) -> "Model":
5858
"""
5959
log.info(f"creating model '{name}'")
6060
request = API.CreateRequest(name=name, metadata=metadata)
61-
res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request)))
61+
res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request)))
6262
krequests.raise_for_status(res)
6363
log.success(f"created model '{name}'")
6464
return Model.__factory__(from_dict(data_class=Model.Data, data=res.json()))
@@ -74,7 +74,7 @@ def load_by_name(cls, name: str) -> "Model":
7474
"""
7575
log.info(f"loading model '{name}'")
7676
request = API.LoadByNameRequest(name=name)
77-
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request)))
77+
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request)))
7878
krequests.raise_for_status(res)
7979
log.success(f"loaded model '{name}'")
8080
return Model.__factory__(from_dict(data_class=Model.Data, data=res.json()))
@@ -146,7 +146,7 @@ def iter_pair_results(
146146
init_request = API.InitLoadPairResultsRequest(batch_size=batch_size, **base_load_request)
147147
yield from _BatchedLoader.iter_data(
148148
init_request=init_request,
149-
endpoint_path=API.Path.INIT_LOAD_PAIR_RESULTS,
149+
endpoint_path=API.Path.INIT_LOAD_PAIR_RESULTS.value,
150150
df_class=LoadedPairResultDataFrame,
151151
)
152152
log.success(f"loaded pair results from model '{self.data.name}' on {test_object_display_name}")

0 commit comments

Comments
 (0)