Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ ci:
repos:
# Standard hooks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
rev: v6.0.0
hooks:
- id: check-added-large-files
exclude: ^imcui/third_party/
Expand All @@ -47,7 +47,7 @@ repos:
exclude: ^imcui/third_party/

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.8.4"
rev: "v0.14.11"
hooks:
- id: ruff
args: ["--fix", "--show-fixes", "--extend-ignore=E402"]
Expand All @@ -56,7 +56,7 @@ repos:

# Checking static types
- repo: https://github.com/pre-commit/mirrors-mypy
rev: "v1.14.0"
rev: "v1.19.1"
hooks:
- id: mypy
files: "setup.py"
Expand All @@ -82,7 +82,7 @@ repos:

# Suggested hook if you add a .clang-format file
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v13.0.0
rev: v21.1.8
hooks:
- id: clang-format
exclude: ^imcui/third_party/
2 changes: 1 addition & 1 deletion imcui/hloc/extract_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def main(
overwrite: bool = False,
) -> Path:
logger.info(
"Extracting local features with configuration:" f"\n{pprint.pformat(conf)}"
f"Extracting local features with configuration:\n{pprint.pformat(conf)}"
)

dataset = ImageDataset(image_dir, conf["preprocessing"], image_list)
Expand Down
4 changes: 2 additions & 2 deletions imcui/hloc/extractors/dog.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def _init(self, conf):
elif conf["descriptor"] == "hardnet":
self.describe = kornia.feature.HardNet(pretrained=True)
elif conf["descriptor"] not in ["sift", "rootsift"]:
raise ValueError(f'Unknown descriptor: {conf["descriptor"]}')
raise ValueError(f"Unknown descriptor: {conf['descriptor']}")

self.sift = None # lazily instantiated on the first image
self.dummy_param = torch.nn.Parameter(torch.empty(0))
Expand Down Expand Up @@ -104,7 +104,7 @@ def _forward(self, data):
patches[start_idx:end_idx]
)
else:
raise ValueError(f'Unknown descriptor: {self.conf["descriptor"]}')
raise ValueError(f"Unknown descriptor: {self.conf['descriptor']}")

keypoints = torch.from_numpy(keypoints[:, :2]) # keep only x, y
scales = torch.from_numpy(scales)
Expand Down
2 changes: 1 addition & 1 deletion imcui/hloc/extractors/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class Example(BaseModel):

def _init(self, conf):
# set checkpoints paths if needed
model_path = example_path / "checkpoints" / f'{conf["model_name"]}'
model_path = example_path / "checkpoints" / f"{conf['model_name']}"
if not model_path.exists():
logger.info(f"No model found at {model_path}")

Expand Down
2 changes: 1 addition & 1 deletion imcui/hloc/extractors/sift.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def _init(self, conf):
else:
backends = {"opencv", "pycolmap", "pycolmap_cpu", "pycolmap_cuda"}
raise ValueError(
f"Unknown backend: {backend} not in " f"{{{','.join(backends)}}}."
f"Unknown backend: {backend} not in {{{','.join(backends)}}}."
)
logger.info("Load SIFT model done.")

Expand Down
10 changes: 5 additions & 5 deletions imcui/hloc/match_dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ def match_and_assign(

# Invalidate matches that are far from selected bin by reassignment
if max_kps is not None:
logger.info(f'Reassign matches with max_error={conf["max_error"]}.')
logger.info(f"Reassign matches with max_error={conf['max_error']}.")
assign_matches(pairs, match_path, cpdict, max_error=conf["max_error"])


Expand Down Expand Up @@ -737,7 +737,7 @@ def main(
overwrite: bool = False,
) -> Path:
logger.info(
"Extracting semi-dense features with configuration:" f"\n{pprint.pformat(conf)}"
f"Extracting semi-dense features with configuration:\n{pprint.pformat(conf)}"
)

if features is None:
Expand All @@ -747,17 +747,17 @@ def main(
features_q = features
if matches is None:
raise ValueError(
"Either provide both features and matches as Path" " or both as names."
"Either provide both features and matches as Path or both as names."
)
else:
if export_dir is None:
raise ValueError(
"Provide an export_dir if features and matches"
f" are not file paths: {features}, {matches}."
)
features_q = Path(export_dir, f'{features}{conf["output"]}.h5')
features_q = Path(export_dir, f"{features}{conf['output']}.h5")
if matches is None:
matches = Path(export_dir, f'{conf["output"]}_{pairs.stem}.h5')
matches = Path(export_dir, f"{conf['output']}_{pairs.stem}.h5")

if features_ref is None:
features_ref = []
Expand Down
10 changes: 4 additions & 6 deletions imcui/hloc/match_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,16 +96,16 @@ def main(
features_q = features
if matches is None:
raise ValueError(
"Either provide both features and matches as Path" " or both as names."
"Either provide both features and matches as Path or both as names."
)
else:
if export_dir is None:
raise ValueError(
"Provide an export_dir if features is not" f" a file path: {features}."
f"Provide an export_dir if features is not a file path: {features}."
)
features_q = Path(export_dir, features + ".h5")
if matches is None:
matches = Path(export_dir, f'{features}_{conf["output"]}_{pairs.stem}.h5')
matches = Path(export_dir, f"{features}_{conf['output']}_{pairs.stem}.h5")

if features_ref is None:
features_ref = features_q
Expand Down Expand Up @@ -146,9 +146,7 @@ def match_from_paths(
feature_path_ref: Path,
overwrite: bool = False,
) -> Path:
logger.info(
"Matching local features with configuration:" f"\n{pprint.pformat(conf)}"
)
logger.info(f"Matching local features with configuration:\n{pprint.pformat(conf)}")

if not feature_path_q.exists():
raise FileNotFoundError(f"Query feature file {feature_path_q}.")
Expand Down
2 changes: 1 addition & 1 deletion imcui/hloc/pipelines/4Seasons/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,5 +227,5 @@ def evaluate_submission(submission_dir, relocs, ths=[0.1, 0.2, 0.5]):
recall = [np.mean(error <= th) for th in ths]
s = f"Relocalization evaluation {submission_dir.name}/{reloc.name}\n"
s += " / ".join([f"{th:>7}m" for th in ths]) + "\n"
s += " / ".join([f"{100*r:>7.3f}%" for r in recall])
s += " / ".join([f"{100 * r:>7.3f}%" for r in recall])
logger.info(s)
6 changes: 3 additions & 3 deletions imcui/hloc/pipelines/7Scenes/create_gt_sfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ def correct_sfm_with_gt_depth(sfm_path, depth_folder_path, output_path):
new_p3D_ids[new_p3D_ids != -1] = sub_p3D_ids
img = img._replace(point3D_ids=new_p3D_ids)

assert len(img.point3D_ids[img.point3D_ids != -1]) == len(
scs
), f"{len(scs)}, {len(img.point3D_ids[img.point3D_ids != -1])}"
assert len(img.point3D_ids[img.point3D_ids != -1]) == len(scs), (
f"{len(scs)}, {len(img.point3D_ids[img.point3D_ids != -1])}"
)
for i, p3did in enumerate(img.point3D_ids[img.point3D_ids != -1]):
points3D[p3did] = points3D[p3did]._replace(xyz=scs[i])
images[imgid] = img
Expand Down
2 changes: 1 addition & 1 deletion imcui/hloc/pipelines/Cambridge/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,5 +141,5 @@ def evaluate(model, results, list_file=None, ext=".bin", only_localized=False):
threshs_R = [1.0, 2.0, 3.0, 5.0, 2.0, 5.0, 10.0]
for th_t, th_R in zip(threshs_t, threshs_R):
ratio = np.mean((errors_t < th_t) & (errors_R < th_R))
out += f"\n\t{th_t*100:.0f}cm, {th_R:.0f}deg : {ratio*100:.2f}%"
out += f"\n\t{th_t * 100:.0f}cm, {th_R:.0f}deg : {ratio * 100:.2f}%"
logger.info(out)
4 changes: 1 addition & 3 deletions imcui/hloc/reconstruction.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,7 @@ def run_reconstruction(
largest_index = index
largest_num_images = num_images
assert largest_index is not None
logger.info(
f"Largest model is #{largest_index} " f"with {largest_num_images} images."
)
logger.info(f"Largest model is #{largest_index} with {largest_num_images} images.")

for filename in ["images.bin", "cameras.bin", "points3D.bin"]:
if (sfm_dir / filename).exists():
Expand Down
2 changes: 1 addition & 1 deletion imcui/hloc/triangulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def parse_option_args(args: List[str], default_options) -> Dict[str, Any]:
target_type = type(getattr(default_options, key))
if not isinstance(value, target_type):
raise ValueError(
f'Incorrect type for option "{key}":' f" {type(value)} vs {target_type}"
f'Incorrect type for option "{key}": {type(value)} vs {target_type}'
)
options[key] = value
return options
Expand Down
14 changes: 7 additions & 7 deletions imcui/ui/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -757,7 +757,7 @@ def run_ransac(
ransac_confidence=ransac_confidence,
ransac_max_iter=ransac_max_iter,
)
logger.info(f"RANSAC matches done using: {time.time()-t1:.3f}s")
logger.info(f"RANSAC matches done using: {time.time() - t1:.3f}s")
t1 = time.time()

# plot images with ransac matches
Expand All @@ -768,7 +768,7 @@ def run_ransac(
output_matches_ransac, num_matches_ransac = display_matches(
state_cache, titles=titles, tag="KPTS_RANSAC"
)
logger.info(f"Display matches done using: {time.time()-t1:.3f}s")
logger.info(f"Display matches done using: {time.time() - t1:.3f}s")
t1 = time.time()

# compute warp images
Expand Down Expand Up @@ -920,7 +920,7 @@ def run_matching(
logger.info(f"Loaded cached model {cache_key}")
else:
matcher = get_model(match_conf)
logger.info(f"Loading model using: {time.time()-t0:.3f}s")
logger.info(f"Loading model using: {time.time() - t0:.3f}s")
t1 = time.time()
yield generate_fake_outputs(
output_keypoints, output_matches_raw, output_matches_ransac, match_conf, {}, {}
Expand Down Expand Up @@ -979,7 +979,7 @@ def run_matching(
# gr.Info(
# f"Matching images done using: {time.time()-t1:.3f}s",
# )
logger.info(f"Matching images done using: {time.time()-t1:.3f}s")
logger.info(f"Matching images done using: {time.time() - t1:.3f}s")
t1 = time.time()

# plot images with keypoints
Expand Down Expand Up @@ -1022,7 +1022,7 @@ def run_matching(
)

# gr.Info(f"RANSAC matches done using: {time.time()-t1:.3f}s")
logger.info(f"RANSAC matches done using: {time.time()-t1:.3f}s")
logger.info(f"RANSAC matches done using: {time.time() - t1:.3f}s")
t1 = time.time()

# plot images with ransac matches
Expand All @@ -1043,7 +1043,7 @@ def run_matching(
)

# gr.Info(f"Display matches done using: {time.time()-t1:.3f}s")
logger.info(f"Display matches done using: {time.time()-t1:.3f}s")
logger.info(f"Display matches done using: {time.time() - t1:.3f}s")
t1 = time.time()
# plot wrapped images
output_wrapped, warped_image = generate_warp_images(
Expand All @@ -1054,7 +1054,7 @@ def run_matching(
)
plt.close("all")
# gr.Info(f"In summary, total time: {time.time()-t0:.3f}s")
logger.info(f"TOTAL time: {time.time()-t0:.3f}s")
logger.info(f"TOTAL time: {time.time() - t0:.3f}s")

state_cache = pred
state_cache["num_matches_raw"] = num_matches_raw
Expand Down