Skip to content

Commit 43caaa7

Browse files
committed
formatting.
1 parent 2f81b57 commit 43caaa7

File tree

19 files changed

+54
-64
lines changed

19 files changed

+54
-64
lines changed

limap/base/unit_test.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -34,10 +34,10 @@ def report_error(imagecols_pred, imagecols):
3434
R_error = (
3535
imagecols_pred.camimage(img_id).R() - imagecols.camimage(img_id).R()
3636
)
37-
R_error = np.sqrt(np.sum(R_error ** 2))
37+
R_error = np.sqrt(np.sum(R_error**2))
3838
T_error = (
3939
imagecols_pred.camimage(img_id).T() - imagecols.camimage(img_id).T()
4040
)
41-
T_error = np.sqrt(np.sum(T_error ** 2))
41+
T_error = np.sqrt(np.sum(T_error**2))
4242
pose_errors.append(np.array([R_error, T_error]))
4343
print("pose_error: (R, T)", np.array(pose_errors).mean(0))

limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py

+6-9
Original file line numberDiff line numberDiff line change
@@ -110,15 +110,12 @@ def _pl_estimate_absolute_pose(
110110
ransac_options.data_type_weights_ = np.array(
111111
[ransac_cfg["weight_point"], ransac_cfg["weight_line"]]
112112
)
113-
ransac_options.data_type_weights_ *= (
114-
np.array(
115-
[
116-
ransac_options.squared_inlier_thresholds_[1],
117-
ransac_options.squared_inlier_thresholds_[0],
118-
]
119-
)
120-
/ np.sum(ransac_options.squared_inlier_thresholds_)
121-
)
113+
ransac_options.data_type_weights_ *= np.array(
114+
[
115+
ransac_options.squared_inlier_thresholds_[1],
116+
ransac_options.squared_inlier_thresholds_[0],
117+
]
118+
) / np.sum(ransac_options.squared_inlier_thresholds_)
122119
ransac_options.min_num_iterations_ = ransac_cfg["min_num_iterations"]
123120
ransac_options.final_least_squares_ = ransac_cfg["final_least_squares"]
124121

limap/features/models/s2dnet.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def print_gpu_memory():
6666
a = torch.cuda.memory_allocated(0)
6767
f = r - a # free inside reserved
6868

69-
print(np.array([t, r, a, f]) / 2 ** 30)
69+
print(np.array([t, r, a, f]) / 2**30)
7070

7171

7272
class AdapLayers(nn.Module):
@@ -130,7 +130,7 @@ def _init(self, conf):
130130
if isinstance(layer, torch.nn.MaxPool2d):
131131
current_scale += 1
132132
if i in self.hypercolumn_indices:
133-
self.scales.append(2 ** current_scale)
133+
self.scales.append(2**current_scale)
134134

135135
self.adaptation_layers = AdapLayers(
136136
conf.hypercolumn_layers, conf.output_dim

limap/features/models/vggnet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def _init(self, conf=default_conf):
3131
if isinstance(layer, torch.nn.MaxPool2d):
3232
current_scale += 1
3333
if i in self.hypercolumn_indices:
34-
self.scales.append(2 ** current_scale)
34+
self.scales.append(2**current_scale)
3535

3636
def _forward(self, data):
3737
image = data # data['image']

limap/line2d/LineTR/line_attention.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def __init__(self, n_heads: int, d_feature: int, dropout=0.1):
3838
self.w_vs = nn.Linear(d_feature, n_heads * dim, bias=True)
3939
self.fc = nn.Linear(n_heads * dim, d_feature, bias=True)
4040

41-
self.attention = ScaledDotProduct(scale=dim ** 0.5)
41+
self.attention = ScaledDotProduct(scale=dim**0.5)
4242

4343
self.dropout = nn.Dropout(dropout)
4444
self.layer_norm = nn.LayerNorm(d_feature, eps=1e-6)

limap/line2d/LineTR/line_process.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def point_on_line(line, dist_px):
5454
vec = ep - sp
5555
if vec[0] != 0:
5656
m = vec[1] / vec[0]
57-
x = np.sqrt(dist_px ** 2 / (1 + m ** 2))
57+
x = np.sqrt(dist_px**2 / (1 + m**2))
5858
y = m * x
5959
else:
6060
x = 0
@@ -94,10 +94,8 @@ def remove_borders(
9494
sp = np.floor(klines[:, 0]).astype(int)
9595
ep = np.floor(klines[:, 1]).astype(int)
9696
valid_mask_given = (
97-
(
98-
valid_mask_given[sp[:, 1], sp[:, 0]]
99-
+ valid_mask_given[ep[:, 1], ep[:, 0]]
100-
)
97+
valid_mask_given[sp[:, 1], sp[:, 0]]
98+
+ valid_mask_given[ep[:, 1], ep[:, 0]]
10199
).astype(bool)
102100
valid_mask = valid_mask & valid_mask_given
103101

@@ -275,7 +273,7 @@ def change_cv2_T_np(klines_cv):
275273
kline_ep = [sp_x, sp_y]
276274

277275
# linelength = math.sqrt((kline_ep[0]-kline_sp[0])**2 +(kline_ep[1]-kline_sp[1])**2)
278-
linelength = line.lineLength * (2 ** line.octave)
276+
linelength = line.lineLength * (2**line.octave)
279277

280278
klines_sp.append(kline_sp)
281279
klines_ep.append(kline_ep)

limap/line2d/LineTR/line_transformer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def forward(
186186
def attention(query, key, value):
187187
dim = query.shape[1]
188188
scores = (
189-
torch.einsum("bdhn,bdhm->bhnm", query, key) / dim ** 0.5
189+
torch.einsum("bdhn,bdhm->bhnm", query, key) / dim**0.5
190190
) # [3, 64, 4, 512] -> [3, 4, 512, 512]
191191
prob = torch.nn.functional.softmax(scores, dim=-1)
192192
return torch.einsum("bhnm,bdhm->bdhn", prob, value), prob

limap/line2d/LineTR/linetr_pipeline.py

+11-13
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ def process_siamese(data, i):
168168
pred["keypoints0"],
169169
pred["keypoints1"],
170170
**data,
171-
pos_th=self.conf.ground_truth.th_positive
171+
pos_th=self.conf.ground_truth.th_positive,
172172
)
173173
pred["gt_assignment"] = assignment
174174
pred["gt_matches0"], pred["gt_matches1"] = m0, m1
@@ -188,7 +188,7 @@ def process_siamese(data, i):
188188
pred["lines0"].reshape(b_size, -1, 2),
189189
pred["lines1"].reshape(b_size, -1, 2),
190190
**data,
191-
pos_th=self.conf.ground_truth.th_positive
191+
pos_th=self.conf.ground_truth.th_positive,
192192
)
193193
pred["samples_gt_assignment"] = samples_assignment
194194
pred["samples_gt_matches0"] = samples_m0
@@ -219,7 +219,7 @@ def process_siamese(data, i):
219219
pred["keypoints1"],
220220
**data,
221221
pos_th=self.conf.ground_truth.th_positive,
222-
neg_th=self.conf.ground_truth.th_negative
222+
neg_th=self.conf.ground_truth.th_negative,
223223
)
224224
pred["gt_assignment"] = assignment
225225
pred["gt_matches0"], pred["gt_matches1"] = m0, m1
@@ -244,10 +244,8 @@ def process_siamese(data, i):
244244
pred["lines1"].reshape(b_size, -1, 2),
245245
**data,
246246
pos_th=self.conf.ground_truth.th_positive,
247-
neg_th=self.conf.ground_truth.th_negative
248-
)[
249-
:3
250-
]
247+
neg_th=self.conf.ground_truth.th_negative,
248+
)[:3]
251249
pred["samples_gt_assignment"] = samples_assignment
252250
pred["samples_gt_matches0"] = samples_m0
253251
pred["samples_gt_matches1"] = samples_m1
@@ -308,13 +306,13 @@ def process_siamese(data, i):
308306
assert match_mat.shape[0] == 1
309307
bool_match_mat = match_mat[0] > 0
310308
pred["line_matches0"] = np.argmax(bool_match_mat, axis=1)
311-
pred["line_matches0"][
312-
~np.any(bool_match_mat, axis=1)
313-
] = UNMATCHED_FEATURE
309+
pred["line_matches0"][~np.any(bool_match_mat, axis=1)] = (
310+
UNMATCHED_FEATURE
311+
)
314312
pred["line_matches1"] = np.argmax(bool_match_mat, axis=0)
315-
pred["line_matches1"][
316-
~np.any(bool_match_mat, axis=0)
317-
] = UNMATCHED_FEATURE
313+
pred["line_matches1"][~np.any(bool_match_mat, axis=0)] = (
314+
UNMATCHED_FEATURE
315+
)
318316
pred["line_matches0"] = torch.from_numpy(pred["line_matches0"])[None]
319317
pred["line_matches1"] = torch.from_numpy(pred["line_matches1"])[None]
320318
lmatch_scores = torch.from_numpy(

limap/line2d/SOLD2/misc/visualize_util.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
""" Organize some frequently used visualization functions. """
1+
"""Organize some frequently used visualization functions."""
22

33
import cv2
44
import numpy as np

limap/line2d/SOLD2/model/line_detection.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -178,9 +178,7 @@ def detect(self, junctions, heatmap, device=torch.device("cpu")):
178178
dim=-1,
179179
)
180180
)
181-
normalized_seg_length = segments_length / (
182-
((H ** 2) + (W ** 2)) ** 0.5
183-
)
181+
normalized_seg_length = segments_length / (((H**2) + (W**2)) ** 0.5)
184182

185183
# Perform local max search
186184
num_cand = cand_h.shape[0]
@@ -552,7 +550,7 @@ def detect_local_max(
552550
"""Detection by local maximum search."""
553551
# Compute the distance threshold
554552
dist_thresh = (
555-
0.5 * (2 ** 0.5) + self.lambda_radius * normalized_seg_length
553+
0.5 * (2**0.5) + self.lambda_radius * normalized_seg_length
556554
)
557555
# Make it N x 64
558556
dist_thresh = torch.repeat_interleave(

limap/line2d/SOLD2/model/loss.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ def space_to_depth(input_tensor, grid_size):
154154
# (N, bs, bs, C, H//bs, W//bs)
155155
x = x.permute(0, 3, 5, 1, 2, 4).contiguous()
156156
# (N, C*bs^2, H//bs, W//bs)
157-
x = x.view(N, C * (grid_size ** 2), H // grid_size, W // grid_size)
157+
x = x.view(N, C * (grid_size**2), H // grid_size, W // grid_size)
158158
return x
159159

160160

limap/line2d/SOLD2/train.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,7 @@ def train_single_epoch(
373373
results = metric_func.metric_results
374374
average = average_meter.average()
375375
# Get gpu memory usage in GB
376-
gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3)
376+
gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024**3)
377377
if compute_descriptors:
378378
print(
379379
"Epoch [%d / %d] Iter [%d / %d] loss=%.4f (%.4f), junc_loss=%.4f (%.4f), heatmap_loss=%.4f (%.4f), descriptor_loss=%.4f (%.4f), gpu_mem=%.4fGB"
@@ -734,7 +734,7 @@ def record_train_summaries(writer, global_step, scalars, images):
734734

735735
# GPU memory part
736736
# Get gpu memory usage in GB
737-
gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3)
737+
gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024**3)
738738
writer.add_scalar("GPU/GPU_memory_usage", gpu_mem_usage, global_step)
739739

740740
# Loss part

limap/line2d/line_utils/merge_lines.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,8 @@ def merge_line_cluster(lines):
104104
if b == 0:
105105
u = np.array([1, 0]) if a >= c else np.array([0, 1])
106106
else:
107-
m = (c - a + np.sqrt((a - c) ** 2 + 4 * b ** 2)) / (2 * b)
108-
u = np.array([1, m]) / np.sqrt(1 + m ** 2)
107+
m = (c - a + np.sqrt((a - c) ** 2 + 4 * b**2)) / (2 * b)
108+
u = np.array([1, m]) / np.sqrt(1 + m**2)
109109

110110
# Get the center of gravity of all endpoints
111111
cross = np.mean(points, axis=0)

limap/optimize/line_localization/functions.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -113,9 +113,10 @@ def reprojection_filter_matches_2to3(
113113
best_id = None
114114
for id in track_ids:
115115
l3d = linetracks[id].line
116-
l2d_start, l2d_end = ref_camview.projection(
117-
l3d.start
118-
), ref_camview.projection(l3d.end)
116+
l2d_start, l2d_end = (
117+
ref_camview.projection(l3d.start),
118+
ref_camview.projection(l3d.end),
119+
)
119120
l2d = _base.Line2d(l2d_start, l2d_end)
120121

121122
dist = dist_func(ref_line, l2d)

limap/point2d/superglue/superglue.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def attention(
9191
query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
9292
) -> Tuple[torch.Tensor, torch.Tensor]:
9393
dim = query.shape[1]
94-
scores = torch.einsum("bdhn,bdhm->bhnm", query, key) / dim ** 0.5
94+
scores = torch.einsum("bdhn,bdhm->bhnm", query, key) / dim**0.5
9595
prob = torch.nn.functional.softmax(scores, dim=-1)
9696
return torch.einsum("bhnm,bdhm->bdhn", prob, value), prob
9797

@@ -336,9 +336,10 @@ def log_optimal_transport(
336336
return Z
337337

338338
def _get_matches(self, scores_mat):
339-
max0, max1 = scores_mat[:, :-1, :-1].max(2), scores_mat[
340-
:, :-1, :-1
341-
].max(1)
339+
max0, max1 = (
340+
scores_mat[:, :-1, :-1].max(2),
341+
scores_mat[:, :-1, :-1].max(1),
342+
)
342343
m0, m1 = max0.indices, max1.indices
343344
mutual0 = arange_like(m0, 1)[None] == m1.gather(1, m0)
344345
mutual1 = arange_like(m1, 1)[None] == m0.gather(1, m1)

limap/pointsfm/database.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838

3939
IS_PYTHON3 = sys.version_info[0] >= 3
4040

41-
MAX_IMAGE_ID = 2 ** 31 - 1
41+
MAX_IMAGE_ID = 2**31 - 1
4242

4343
CREATE_CAMERAS_TABLE = """CREATE TABLE IF NOT EXISTS cameras (
4444
camera_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
@@ -68,9 +68,7 @@
6868
prior_tz REAL,
6969
CONSTRAINT image_id_check CHECK(image_id >= 0 and image_id < {}),
7070
FOREIGN KEY(camera_id) REFERENCES cameras(camera_id))
71-
""".format(
72-
MAX_IMAGE_ID
73-
)
71+
""".format(MAX_IMAGE_ID)
7472

7573
CREATE_TWO_VIEW_GEOMETRIES_TABLE = """
7674
CREATE TABLE IF NOT EXISTS two_view_geometries (

limap/util/geometry.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -48,13 +48,13 @@ def rotation_from_quaternion(quad):
4848
quad = quad / norm
4949
qr, qi, qj, qk = quad[0], quad[1], quad[2], quad[3]
5050
rot_mat = np.zeros((3, 3))
51-
rot_mat[0, 0] = 1 - 2 * (qj ** 2 + qk ** 2)
51+
rot_mat[0, 0] = 1 - 2 * (qj**2 + qk**2)
5252
rot_mat[0, 1] = 2 * (qi * qj - qk * qr)
5353
rot_mat[0, 2] = 2 * (qi * qk + qj * qr)
5454
rot_mat[1, 0] = 2 * (qi * qj + qk * qr)
55-
rot_mat[1, 1] = 1 - 2 * (qi ** 2 + qk ** 2)
55+
rot_mat[1, 1] = 1 - 2 * (qi**2 + qk**2)
5656
rot_mat[1, 2] = 2 * (qj * qk - qi * qr)
5757
rot_mat[2, 0] = 2 * (qi * qk - qj * qr)
5858
rot_mat[2, 1] = 2 * (qj * qk + qi * qr)
59-
rot_mat[2, 2] = 1 - 2 * (qi ** 2 + qj ** 2)
59+
rot_mat[2, 2] = 1 - 2 * (qi**2 + qj**2)
6060
return rot_mat

runners/hypersim/Hypersim.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,9 @@ def set_max_dim(cls, max_dim):
9595

9696
@classmethod
9797
def set_resize_ratio(cls, ratio):
98-
cls.h, cls.w = int(round(cls.default_h * ratio)), int(
99-
round(cls.default_w * ratio)
98+
cls.h, cls.w = (
99+
int(round(cls.default_h * ratio)),
100+
int(round(cls.default_w * ratio)),
100101
)
101102
cls.K[0, :] = cls.default_K[0, :] * cls.w / cls.default_w
102103
cls.K[1, :] = cls.default_K[1, :] * cls.h / cls.default_h

runners/inloc/localization.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,7 @@ def parse_config():
9898
# Output folder for LIMAP linetracks (in tmp)
9999
if cfg["output_folder"] is None:
100100
cfg["output_folder"] = "finaltracks"
101-
cfg[
102-
"inloc_dataset"
103-
] = (
101+
cfg["inloc_dataset"] = (
104102
args.dataset
105103
) # For reading camera poses for estimating 3D lines fron depth
106104
return cfg, args

0 commit comments

Comments
 (0)