From 3b145a2361a694d1dd0cadaa4bd2b9fca5b4bff2 Mon Sep 17 00:00:00 2001 From: Hannah DeFazio Date: Thu, 16 May 2024 16:28:16 -0400 Subject: [PATCH] Black formatting --- .../tcn_hpl/predict.py | 19 +++++------ .../train_activity_classifier.py | 2 +- .../create_custom_learn_video_dataset.py | 6 ++-- angel_system/data/common/kwcoco_utils.py | 6 ++-- .../global_step_predictor.py | 24 ++++++------- .../activity_classifier_tcn.py | 14 ++++---- .../angel_utils/activity_classification.py | 34 ++++++++++++------- ros/angel_utils/scripts/bag_extractor.py | 18 +++++----- 8 files changed, 65 insertions(+), 58 deletions(-) diff --git a/angel_system/activity_classification/tcn_hpl/predict.py b/angel_system/activity_classification/tcn_hpl/predict.py index 7d44665ef..4715b3f8f 100644 --- a/angel_system/activity_classification/tcn_hpl/predict.py +++ b/angel_system/activity_classification/tcn_hpl/predict.py @@ -119,9 +119,9 @@ def normalize_detection_features( :param image_height: Integer pixel height of the image that object detections were generated on. :param num_det_classes: Number of object detection classes (note: DOES include the hand labels but DOES NOT include the patient and user labels) - :param normalize_pixel_pts: If true, will apply the NormalizePixelPts data augmentation to + :param normalize_pixel_pts: If true, will apply the NormalizePixelPts data augmentation to the ``det_feats`` - :param normalize_center_pts: If true, will apply the NormalizeFromCenter data augmentation to + :param normalize_center_pts: If true, will apply the NormalizeFromCenter data augmentation to the ``det_feats`` :return: Normalized object detection features. @@ -178,13 +178,13 @@ def objects_to_feats( :param pose_memo: Optional memoization cache of the pose used for each frame and the repeat pose count at each frame :param top_k_objects: Number top confidence objects to use per label, defaults to 1 - :param normalize_pixel_pts: If true, will apply the NormalizePixelPts data augmentation to + :param normalize_pixel_pts: If true, will apply the NormalizePixelPts data augmentation to the feature vector - :param normalize_center_pts: If true, will apply the NormalizeFromCenter data augmentation to + :param normalize_center_pts: If true, will apply the NormalizeFromCenter data augmentation to the feature vector :param pose_repeat_rate: The maximum number of sequential None value poses that can be replaced with - a valid pose in a previous frame. If this number is exceeded, the pose - for the frame will remain None. + a valid pose in a previous frame. If this number is exceeded, the pose + for the frame will remain None. :raises ValueError: No object detections nor patient poses passed in. :raises ValueError: No non-None object detections in the given input @@ -227,7 +227,7 @@ def objects_to_feats( for i, (pose, detections) in enumerate( zip(frame_patient_poses, frame_object_detections) ): - + if detections is None: print("no detections!") continue @@ -268,7 +268,7 @@ def objects_to_feats( elif last_pose is not None: repeated_pose_count += 1 # Repeat at most {pose_repeat_rate} poses in a row - if repeated_pose_count > (pose_repeat_rate/2): + if repeated_pose_count > (pose_repeat_rate / 2): last_pose = None print("Resetting pose to None") repeated_pose_count = 0 @@ -283,7 +283,7 @@ def objects_to_feats( pose_memo[memo_key] = { "last_pose": last_pose, - "repeated_pose_count": repeated_pose_count + "repeated_pose_count": repeated_pose_count, } # Grab the joint keypoints @@ -316,7 +316,6 @@ def objects_to_feats( .astype(np.float32) ) - feat_memo[memo_key] = feat feature_ndim = feat.shape diff --git a/angel_system/activity_classification/train_activity_classifier.py b/angel_system/activity_classification/train_activity_classifier.py index e2c6c4fcb..d6b7579b4 100644 --- a/angel_system/activity_classification/train_activity_classifier.py +++ b/angel_system/activity_classification/train_activity_classifier.py @@ -113,7 +113,7 @@ def compute_feats( ann_by_image: dict, feat_version=1, top_k_objects=1, - pose_repeat_rate=4 + pose_repeat_rate=4, ) -> Tuple[np.ndarray, np.ndarray]: """Compute features from object detections diff --git a/angel_system/data/common/create_custom_learn_video_dataset.py b/angel_system/data/common/create_custom_learn_video_dataset.py index 9a8b9db1d..90e21c6c9 100644 --- a/angel_system/data/common/create_custom_learn_video_dataset.py +++ b/angel_system/data/common/create_custom_learn_video_dataset.py @@ -65,9 +65,9 @@ def main(args): ): temp_df = df[df["# 1: Detection or Track-id"] == str(label)] if temp_df.iloc[0]["10-11+: Repeated Species"] not in label_dict.keys(): - label_dict[temp_df.iloc[0]["10-11+: Repeated Species"]] = ( - label_counter - ) + label_dict[ + temp_df.iloc[0]["10-11+: Repeated Species"] + ] = label_counter label_counter += 1 min_frame = pd.to_numeric(temp_df["3: Unique Frame Identifier"]).min() max_frame = pd.to_numeric(temp_df["3: Unique Frame Identifier"]).max() diff --git a/angel_system/data/common/kwcoco_utils.py b/angel_system/data/common/kwcoco_utils.py index cf997c1d7..73108da10 100644 --- a/angel_system/data/common/kwcoco_utils.py +++ b/angel_system/data/common/kwcoco_utils.py @@ -92,7 +92,7 @@ def add_activity_gt_to_kwcoco(topic, task, dset, activity_config_fn): # Update the activity gt for each image for gid in sorted(image_ids): im = dset.imgs[gid] - + frame_idx, time = time_from_name(im["file_name"], topic) if time: @@ -106,7 +106,7 @@ def add_activity_gt_to_kwcoco(topic, task, dset, activity_config_fn): label = "background" activity_label = label else: - label = matching_gt.iloc[0]["class_label"] # default to the first gt + label = matching_gt.iloc[0]["class_label"] # default to the first gt # Hacky temporary fix # In the medical data, step 1 can cover the same frames as other steps, @@ -134,7 +134,7 @@ def add_activity_gt_to_kwcoco(topic, task, dset, activity_config_fn): f"Label: {label} is not in the activity labels config, ignoring" ) print(f"LABEL: {label}, {type(label)}") - + label = "background" activity_label = label else: diff --git a/angel_system/global_step_prediction/global_step_predictor.py b/angel_system/global_step_prediction/global_step_predictor.py index 24063bf95..5841879bb 100644 --- a/angel_system/global_step_prediction/global_step_predictor.py +++ b/angel_system/global_step_prediction/global_step_predictor.py @@ -235,9 +235,9 @@ def initialize_new_recipe_tracker(self, recipe, config_fn=None): }, ) - tracker_dict["last_granular_step_per_broad_step"] = ( - self.get_last_granular_step_per_broad_step(broad_steps) - ) + tracker_dict[ + "last_granular_step_per_broad_step" + ] = self.get_last_granular_step_per_broad_step(broad_steps) tracker_dict["recipe"] = recipe tracker_dict["current_broad_step"] = 0 @@ -254,9 +254,9 @@ def initialize_new_recipe_tracker(self, recipe, config_fn=None): tracker_dict["broad_step_to_activity_ids"] = [ self.get_unique(step["activity_ids"]) for step in broad_steps ] - tracker_dict["granular_step_to_activity_id"] = ( - self.get_activity_per_granular_step(broad_steps) - ) + tracker_dict[ + "granular_step_to_activity_id" + ] = self.get_activity_per_granular_step(broad_steps) # Labels tracker_dict["broad_step_to_label"] = [step["label"] for step in broad_steps] @@ -300,9 +300,9 @@ def increment_granular_step(self, tracker_ind): if current_granular_step < num_granular_steps: self.trackers[tracker_ind]["current_granular_step"] += 1 - self.trackers[tracker_ind]["current_broad_step"] = ( - self.granular_to_broad_step(tracker, current_granular_step) - ) + self.trackers[tracker_ind][ + "current_broad_step" + ] = self.granular_to_broad_step(tracker, current_granular_step) elif current_granular_step == num_granular_steps and tracker["active"] == True: self.trackers[tracker_ind]["active"] = False else: @@ -335,9 +335,9 @@ def decrement_granular_step(self, tracker_ind): if current_granular_step > 0: self.trackers[tracker_ind]["current_granular_step"] -= 1 - self.trackers[tracker_ind]["current_broad_step"] = ( - self.granular_to_broad_step(tracker, current_granular_step) - ) + self.trackers[tracker_ind][ + "current_broad_step" + ] = self.granular_to_broad_step(tracker, current_granular_step) else: raise Exception( f"Tried to decrement tracker #{tracker_ind}: " diff --git a/ros/angel_system_nodes/angel_system_nodes/activity_classification/activity_classifier_tcn.py b/ros/angel_system_nodes/angel_system_nodes/activity_classification/activity_classifier_tcn.py index d622470f7..3eaa84974 100644 --- a/ros/angel_system_nodes/angel_system_nodes/activity_classification/activity_classifier_tcn.py +++ b/ros/angel_system_nodes/angel_system_nodes/activity_classification/activity_classifier_tcn.py @@ -420,9 +420,9 @@ def _thread_populate_from_coco(self, input_coco_path: Path) -> None: # Creates [n_det, n_label] matrix, which we assign to and then # ravel into the message slot. conf_mat = np.zeros((n_dets, len(obj_labels)), dtype=np.float64) - conf_mat[np.arange(n_dets), image_annots.get("category_id")] = ( - image_annots.get("confidence") - ) + conf_mat[ + np.arange(n_dets), image_annots.get("category_id") + ] = image_annots.get("confidence") det_msg.label_confidences.extend(conf_mat.ravel()) # Calling the image callback last since image frames define the @@ -602,7 +602,7 @@ def rt_loop(self): window = self._buffer.get_window( self._window_size, - have_leading_object=self._window_lead_with_objects + have_leading_object=self._window_lead_with_objects, ) # log.info(f"buffer contents: {window.obj_dets}") @@ -762,11 +762,11 @@ def _process_window(self, window: InputWindow) -> ActivityDetection: feat_version=self._feat_version, image_width=self._img_pix_width, image_height=self._img_pix_height, - #feature_memo=memo_object_to_feats, # passed by reference so this gets updated in the function and changes persist here - #pose_memo=queued_pose_memo, + # feature_memo=memo_object_to_feats, # passed by reference so this gets updated in the function and changes persist here + # pose_memo=queued_pose_memo, normalize_pixel_pts=self.model_normalize_pixel_pts, normalize_center_pts=self.model_normalize_center_pts, - pose_repeat_rate=self._pose_repeat_rate + pose_repeat_rate=self._pose_repeat_rate, ) except ValueError as ex: log.warn(f"object-to-feats: ValueError: {ex}") diff --git a/ros/angel_utils/python/angel_utils/activity_classification.py b/ros/angel_utils/python/angel_utils/activity_classification.py index 949f43ab7..c037cdae6 100644 --- a/ros/angel_utils/python/angel_utils/activity_classification.py +++ b/ros/angel_utils/python/angel_utils/activity_classification.py @@ -63,14 +63,20 @@ def __repr__(self): timestamp frame and correlated data **lower** in the table (higher index). This order is arbitrary. """ - return repr(pd.DataFrame( - data={ - "frames": [time_to_int(f[0]) for f in self.frames], - "detections": [(d.num_detections if d else None) for d in self.obj_dets], - "poses": [(len(p.joints) if p else None) for p in self.patient_joint_kps], - }, - dtype=pd.Int64Dtype, - )) + return repr( + pd.DataFrame( + data={ + "frames": [time_to_int(f[0]) for f in self.frames], + "detections": [ + (d.num_detections if d else None) for d in self.obj_dets + ], + "poses": [ + (len(p.joints) if p else None) for p in self.patient_joint_kps + ], + }, + dtype=pd.Int64Dtype, + ) + ) # TODO: A more generic version of InputBuffer @@ -337,11 +343,13 @@ def get_window( # starting. # Finally, the extracted slice is reversed in order again so that # the final list is in temporally ascending order. - window_frames = list(itertools.islice( - reversed(self.frames), - window_frame_start_idx, - window_size + window_frame_start_idx - ))[::-1] + window_frames = list( + itertools.islice( + reversed(self.frames), + window_frame_start_idx, + window_size + window_frame_start_idx, + ) + )[::-1] window_frame_times: List[Time] = [wf[0] for wf in window_frames] window_frame_times_ns: List[int] = [ time_to_int(wft) for wft in window_frame_times diff --git a/ros/angel_utils/scripts/bag_extractor.py b/ros/angel_utils/scripts/bag_extractor.py index 25947496e..8e10ac97a 100644 --- a/ros/angel_utils/scripts/bag_extractor.py +++ b/ros/angel_utils/scripts/bag_extractor.py @@ -153,19 +153,19 @@ def __init__(self): if self.extract_head_pose_data or self.extract_depth_head_pose_data: self.msg_type_to_handler_map[HeadsetPoseData] = self.handle_head_pose_msg if self.extract_hand_pose_data: - self.msg_type_to_handler_map[HandJointPosesUpdate] = ( - self.handle_hand_pose_msg - ) + self.msg_type_to_handler_map[ + HandJointPosesUpdate + ] = self.handle_hand_pose_msg if self.extract_spatial_map_data: self.msg_type_to_handler_map[SpatialMesh] = self.handle_spatial_mesh_msg if self.extract_annotation_event_data: - self.msg_type_to_handler_map[AnnotationEvent] = ( - self.handle_annotation_event_msg - ) + self.msg_type_to_handler_map[ + AnnotationEvent + ] = self.handle_annotation_event_msg if self.extract_activity_detection_data: - self.msg_type_to_handler_map[ActivityDetection] = ( - self.handle_activity_detection_msg - ) + self.msg_type_to_handler_map[ + ActivityDetection + ] = self.handle_activity_detection_msg if self.extract_task_update_data: self.msg_type_to_handler_map[TaskUpdate] = self.handle_task_update_msg