Skip to content

Commit

Permalink
Merge pull request #415 from hdefazio/fix/black
Browse files Browse the repository at this point in the history
Black formatting
  • Loading branch information
hdefazio authored May 16, 2024
2 parents 7ff983f + 3b145a2 commit e8fa2a7
Show file tree
Hide file tree
Showing 8 changed files with 65 additions and 58 deletions.
19 changes: 9 additions & 10 deletions angel_system/activity_classification/tcn_hpl/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,9 @@ def normalize_detection_features(
:param image_height: Integer pixel height of the image that object
detections were generated on.
:param num_det_classes: Number of object detection classes (note: DOES include the hand labels but DOES NOT include the patient and user labels)
:param normalize_pixel_pts: If true, will apply the NormalizePixelPts data augmentation to
:param normalize_pixel_pts: If true, will apply the NormalizePixelPts data augmentation to
the ``det_feats``
:param normalize_center_pts: If true, will apply the NormalizeFromCenter data augmentation to
:param normalize_center_pts: If true, will apply the NormalizeFromCenter data augmentation to
the ``det_feats``
:return: Normalized object detection features.
Expand Down Expand Up @@ -178,13 +178,13 @@ def objects_to_feats(
:param pose_memo: Optional memoization cache of the pose used for each
frame and the repeat pose count at each frame
:param top_k_objects: Number top confidence objects to use per label, defaults to 1
:param normalize_pixel_pts: If true, will apply the NormalizePixelPts data augmentation to
:param normalize_pixel_pts: If true, will apply the NormalizePixelPts data augmentation to
the feature vector
:param normalize_center_pts: If true, will apply the NormalizeFromCenter data augmentation to
:param normalize_center_pts: If true, will apply the NormalizeFromCenter data augmentation to
the feature vector
:param pose_repeat_rate: The maximum number of sequential None value poses that can be replaced with
a valid pose in a previous frame. If this number is exceeded, the pose
for the frame will remain None.
a valid pose in a previous frame. If this number is exceeded, the pose
for the frame will remain None.
:raises ValueError: No object detections nor patient poses passed in.
:raises ValueError: No non-None object detections in the given input
Expand Down Expand Up @@ -227,7 +227,7 @@ def objects_to_feats(
for i, (pose, detections) in enumerate(
zip(frame_patient_poses, frame_object_detections)
):

if detections is None:
print("no detections!")
continue
Expand Down Expand Up @@ -268,7 +268,7 @@ def objects_to_feats(
elif last_pose is not None:
repeated_pose_count += 1
# Repeat at most {pose_repeat_rate} poses in a row
if repeated_pose_count > (pose_repeat_rate/2):
if repeated_pose_count > (pose_repeat_rate / 2):
last_pose = None
print("Resetting pose to None")
repeated_pose_count = 0
Expand All @@ -283,7 +283,7 @@ def objects_to_feats(

pose_memo[memo_key] = {
"last_pose": last_pose,
"repeated_pose_count": repeated_pose_count
"repeated_pose_count": repeated_pose_count,
}

# Grab the joint keypoints
Expand Down Expand Up @@ -316,7 +316,6 @@ def objects_to_feats(
.astype(np.float32)
)


feat_memo[memo_key] = feat

feature_ndim = feat.shape
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def compute_feats(
ann_by_image: dict,
feat_version=1,
top_k_objects=1,
pose_repeat_rate=4
pose_repeat_rate=4,
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute features from object detections
Expand Down
6 changes: 3 additions & 3 deletions angel_system/data/common/create_custom_learn_video_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@ def main(args):
):
temp_df = df[df["# 1: Detection or Track-id"] == str(label)]
if temp_df.iloc[0]["10-11+: Repeated Species"] not in label_dict.keys():
label_dict[temp_df.iloc[0]["10-11+: Repeated Species"]] = (
label_counter
)
label_dict[
temp_df.iloc[0]["10-11+: Repeated Species"]
] = label_counter
label_counter += 1
min_frame = pd.to_numeric(temp_df["3: Unique Frame Identifier"]).min()
max_frame = pd.to_numeric(temp_df["3: Unique Frame Identifier"]).max()
Expand Down
6 changes: 3 additions & 3 deletions angel_system/data/common/kwcoco_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def add_activity_gt_to_kwcoco(topic, task, dset, activity_config_fn):
# Update the activity gt for each image
for gid in sorted(image_ids):
im = dset.imgs[gid]

frame_idx, time = time_from_name(im["file_name"], topic)

if time:
Expand All @@ -106,7 +106,7 @@ def add_activity_gt_to_kwcoco(topic, task, dset, activity_config_fn):
label = "background"
activity_label = label
else:
label = matching_gt.iloc[0]["class_label"] # default to the first gt
label = matching_gt.iloc[0]["class_label"] # default to the first gt

# Hacky temporary fix
# In the medical data, step 1 can cover the same frames as other steps,
Expand Down Expand Up @@ -134,7 +134,7 @@ def add_activity_gt_to_kwcoco(topic, task, dset, activity_config_fn):
f"Label: {label} is not in the activity labels config, ignoring"
)
print(f"LABEL: {label}, {type(label)}")

label = "background"
activity_label = label
else:
Expand Down
24 changes: 12 additions & 12 deletions angel_system/global_step_prediction/global_step_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,9 +235,9 @@ def initialize_new_recipe_tracker(self, recipe, config_fn=None):
},
)

tracker_dict["last_granular_step_per_broad_step"] = (
self.get_last_granular_step_per_broad_step(broad_steps)
)
tracker_dict[
"last_granular_step_per_broad_step"
] = self.get_last_granular_step_per_broad_step(broad_steps)
tracker_dict["recipe"] = recipe

tracker_dict["current_broad_step"] = 0
Expand All @@ -254,9 +254,9 @@ def initialize_new_recipe_tracker(self, recipe, config_fn=None):
tracker_dict["broad_step_to_activity_ids"] = [
self.get_unique(step["activity_ids"]) for step in broad_steps
]
tracker_dict["granular_step_to_activity_id"] = (
self.get_activity_per_granular_step(broad_steps)
)
tracker_dict[
"granular_step_to_activity_id"
] = self.get_activity_per_granular_step(broad_steps)

# Labels
tracker_dict["broad_step_to_label"] = [step["label"] for step in broad_steps]
Expand Down Expand Up @@ -300,9 +300,9 @@ def increment_granular_step(self, tracker_ind):

if current_granular_step < num_granular_steps:
self.trackers[tracker_ind]["current_granular_step"] += 1
self.trackers[tracker_ind]["current_broad_step"] = (
self.granular_to_broad_step(tracker, current_granular_step)
)
self.trackers[tracker_ind][
"current_broad_step"
] = self.granular_to_broad_step(tracker, current_granular_step)
elif current_granular_step == num_granular_steps and tracker["active"] == True:
self.trackers[tracker_ind]["active"] = False
else:
Expand Down Expand Up @@ -335,9 +335,9 @@ def decrement_granular_step(self, tracker_ind):

if current_granular_step > 0:
self.trackers[tracker_ind]["current_granular_step"] -= 1
self.trackers[tracker_ind]["current_broad_step"] = (
self.granular_to_broad_step(tracker, current_granular_step)
)
self.trackers[tracker_ind][
"current_broad_step"
] = self.granular_to_broad_step(tracker, current_granular_step)
else:
raise Exception(
f"Tried to decrement tracker #{tracker_ind}: "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -420,9 +420,9 @@ def _thread_populate_from_coco(self, input_coco_path: Path) -> None:
# Creates [n_det, n_label] matrix, which we assign to and then
# ravel into the message slot.
conf_mat = np.zeros((n_dets, len(obj_labels)), dtype=np.float64)
conf_mat[np.arange(n_dets), image_annots.get("category_id")] = (
image_annots.get("confidence")
)
conf_mat[
np.arange(n_dets), image_annots.get("category_id")
] = image_annots.get("confidence")
det_msg.label_confidences.extend(conf_mat.ravel())

# Calling the image callback last since image frames define the
Expand Down Expand Up @@ -602,7 +602,7 @@ def rt_loop(self):

window = self._buffer.get_window(
self._window_size,
have_leading_object=self._window_lead_with_objects
have_leading_object=self._window_lead_with_objects,
)

# log.info(f"buffer contents: {window.obj_dets}")
Expand Down Expand Up @@ -762,11 +762,11 @@ def _process_window(self, window: InputWindow) -> ActivityDetection:
feat_version=self._feat_version,
image_width=self._img_pix_width,
image_height=self._img_pix_height,
#feature_memo=memo_object_to_feats, # passed by reference so this gets updated in the function and changes persist here
#pose_memo=queued_pose_memo,
# feature_memo=memo_object_to_feats, # passed by reference so this gets updated in the function and changes persist here
# pose_memo=queued_pose_memo,
normalize_pixel_pts=self.model_normalize_pixel_pts,
normalize_center_pts=self.model_normalize_center_pts,
pose_repeat_rate=self._pose_repeat_rate
pose_repeat_rate=self._pose_repeat_rate,
)
except ValueError as ex:
log.warn(f"object-to-feats: ValueError: {ex}")
Expand Down
34 changes: 21 additions & 13 deletions ros/angel_utils/python/angel_utils/activity_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,20 @@ def __repr__(self):
timestamp frame and correlated data **lower** in the table (higher
index). This order is arbitrary.
"""
return repr(pd.DataFrame(
data={
"frames": [time_to_int(f[0]) for f in self.frames],
"detections": [(d.num_detections if d else None) for d in self.obj_dets],
"poses": [(len(p.joints) if p else None) for p in self.patient_joint_kps],
},
dtype=pd.Int64Dtype,
))
return repr(
pd.DataFrame(
data={
"frames": [time_to_int(f[0]) for f in self.frames],
"detections": [
(d.num_detections if d else None) for d in self.obj_dets
],
"poses": [
(len(p.joints) if p else None) for p in self.patient_joint_kps
],
},
dtype=pd.Int64Dtype,
)
)


# TODO: A more generic version of InputBuffer
Expand Down Expand Up @@ -337,11 +343,13 @@ def get_window(
# starting.
# Finally, the extracted slice is reversed in order again so that
# the final list is in temporally ascending order.
window_frames = list(itertools.islice(
reversed(self.frames),
window_frame_start_idx,
window_size + window_frame_start_idx
))[::-1]
window_frames = list(
itertools.islice(
reversed(self.frames),
window_frame_start_idx,
window_size + window_frame_start_idx,
)
)[::-1]
window_frame_times: List[Time] = [wf[0] for wf in window_frames]
window_frame_times_ns: List[int] = [
time_to_int(wft) for wft in window_frame_times
Expand Down
18 changes: 9 additions & 9 deletions ros/angel_utils/scripts/bag_extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,19 +153,19 @@ def __init__(self):
if self.extract_head_pose_data or self.extract_depth_head_pose_data:
self.msg_type_to_handler_map[HeadsetPoseData] = self.handle_head_pose_msg
if self.extract_hand_pose_data:
self.msg_type_to_handler_map[HandJointPosesUpdate] = (
self.handle_hand_pose_msg
)
self.msg_type_to_handler_map[
HandJointPosesUpdate
] = self.handle_hand_pose_msg
if self.extract_spatial_map_data:
self.msg_type_to_handler_map[SpatialMesh] = self.handle_spatial_mesh_msg
if self.extract_annotation_event_data:
self.msg_type_to_handler_map[AnnotationEvent] = (
self.handle_annotation_event_msg
)
self.msg_type_to_handler_map[
AnnotationEvent
] = self.handle_annotation_event_msg
if self.extract_activity_detection_data:
self.msg_type_to_handler_map[ActivityDetection] = (
self.handle_activity_detection_msg
)
self.msg_type_to_handler_map[
ActivityDetection
] = self.handle_activity_detection_msg
if self.extract_task_update_data:
self.msg_type_to_handler_map[TaskUpdate] = self.handle_task_update_msg

Expand Down

0 comments on commit e8fa2a7

Please sign in to comment.