Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 54 additions & 8 deletions GCPMarkerAdditions/ImportMarkerFeatures.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
__version__ = "1.0"
__version__ = ""

from meshroom.core import desc

import os
import csv
import json
import struct
import itertools

class ImportMarkerFeatures(desc.Node):
category = 'Utils'
Expand Down Expand Up @@ -50,11 +51,18 @@ class ImportMarkerFeatures(desc.Node):
value = "",
uid = [0]
),
desc.BoolParam(
name = "hack",
label = "Enable to Bypass 128-Tag Limit",
description = "This option skips the FeatureMatching node and directly generates a matches.txt file for cctag match.",
value = False,
uid = [0]
),
desc.ChoiceParam(
name = "delimiter",
label = "Delimiter",
description = "Delimiter character used in the input CSV file.",
value = "space",
value = "comma",
values = ["space", "tab", "comma", "colon", "semicolon"],
exclusive = True,
uid = [0]
Expand Down Expand Up @@ -86,6 +94,13 @@ class ImportMarkerFeatures(desc.Node):
description = "Output path for the features and descriptors files (*.feat, *.desc).",
value = desc.Node.internalFolder,
uid = []
),
desc.File(
name = "matches_out",
label = "Matches Folder",
description = "Link to the SFM node's Matches Folder input, witch supports multiple elements.",
value = desc.Node.internalFolder,
uid = []
)
]

Expand All @@ -95,7 +110,7 @@ def load_images(self, chunk, filepath, delimiter):

with open(filepath) as file:
gcp_file = csv.reader(file, delimiter=delimiter)
csv_data = [(row[2], float(row[0]), float(row[1]), float(row[4]), int(row[3])) for row in gcp_file]
csv_data = [(row[2], row[0], row[1], row[4], int(row[3])) for row in gcp_file]

images.update({item[0]: [] for item in csv_data})

Expand Down Expand Up @@ -127,24 +142,32 @@ def write_describers(self, chunk, images, lookup):
chunk.logManager.makeProgressBar(len(lookup))

found_markers = {i: 0 for i in list(set([marker[3] for img in images for marker in images[img]]))}
feature_lookup = {viewid: {} for viewid in lookup.values()} # feature_lookup[viewid][tagid]

for i, img in enumerate(lookup):
viewid = lookup[img]

feat = open(os.path.join(chunk.node.output.value, viewid + (".%s.feat" % chunk.node.type.value)), "w")
desc = open(os.path.join(chunk.node.output.value, viewid + (".%s.desc" % chunk.node.type.value)), "wb")

if img in images:
feat_idx = 0
markers = images[img]

desc.write(struct.pack('<Q', len(markers)))

for marker in markers:
found_markers[marker[3]] += 1
feat.write("%.2f %.2f %.4f 0\n" % (marker[0], marker[1], marker[2]))
feat_x, feat_y, feat_size, feat_orientation = marker[0], marker[1], marker[2], "0"
tagid = marker[3]
found_markers[tagid] += 1
feat.write(" ".join((feat_x, feat_y, feat_size, feat_orientation + "\n")))
feature_lookup[viewid][tagid] = str(feat_idx)
feat_idx += 1

if chunk.node.hack.value is False:
data = bytearray(128)
data[marker[3]] = 255
desc.write(data)

else:
desc.write(struct.pack('<Q', 0))

Expand All @@ -157,6 +180,26 @@ def write_describers(self, chunk, images, lookup):
for marker in found_markers:
chunk.logger.info("\tFound marker %d in %d view(s)" % (marker, found_markers[marker]))

return feature_lookup

def make_matches_txt(self, chunk, image_pairs, feature_lookup):
out_temp = []
for pair in image_pairs:
viewid_A, viewid_B = pair
tags_detected_A = feature_lookup[viewid_A].keys()
tags_detected_B = feature_lookup[viewid_B].keys()
match = tags_detected_A & tags_detected_B
if match:
out_temp.append(" ".join((viewid_A, viewid_B)))
out_temp.append("1")
out_temp.append(f"{chunk.node.type.value} {len(match)}")
for tagid in match:
feature_index_A = feature_lookup[viewid_A][tagid]
feature_index_B = feature_lookup[viewid_B][tagid]
out_temp.append(" ".join((feature_index_A, feature_index_B)))

with open(os.path.join(chunk.node.matches_out.value, "0.matches.txt"), "w") as matches_txt:
matches_txt.write("\n".join(out_temp))

def processChunk(self, chunk):
delimiters_options = {
Expand All @@ -176,8 +219,11 @@ def processChunk(self, chunk):
raise OSError("Marker features list file not found")

lookup = self.load_viewids(chunk)
image_pairs = list(itertools.combinations(lookup.values(), 2)) # This method can produce the same results as the ImageMatching node that uses the "Exhaustive" method.
images = self.load_images(chunk, chunk.node.matches.value, delimiters_options[chunk.node.delimiter.value])
self.write_describers(chunk, images, lookup)
feature_lookup = self.write_describers(chunk, images, lookup)
if chunk.node.hack.value:
self.make_matches_txt(chunk, image_pairs, feature_lookup)

chunk.logger.info("Task done")

Expand All @@ -186,4 +232,4 @@ def processChunk(self, chunk):
chunk.logger.error(e)
raise
finally:
chunk.logManager.end()
chunk.logManager.end()
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ The built-in marker detection support in Meshroom is rather obviously an afterth

The marker feature describer files generated from the supplied data will be placed in this folder. This folder can then be added to the Features Folders input of the FeatureMatching node. (The desired marker family feature type - cctag3 or cctag4 - has to be manually enabled in the FeatureMatching node!!!)

NOTE: A known issue(on Windows) is that when the SFM node uses the CCTAG describer, if you manually enter the SFM node's input paths (such as SfMData and Features Folders), please verify the drive letter's case. Otherwise, a fatal error: "Error while loading markers regions" will occur at the end of the SFM process. This issue does not occur when using other describer types.

## SfMTransformFromMarkers node

![](images/SfMTransformFromMarkers_node.png)
Expand Down Expand Up @@ -167,4 +169,4 @@ The SfMTransform node included in Meshroom can be used to georeference the spars

2. Poses

File containing view descriptions, adjusted intrinsic parameters, and georeferenced extrinsic parameters.
File containing view descriptions, adjusted intrinsic parameters, and georeferenced extrinsic parameters.