|
| 1 | +#!/usr/bin/env python |
| 2 | +import argparse |
| 3 | +import logging |
| 4 | +import os |
| 5 | +import sys |
| 6 | + |
| 7 | +try: |
| 8 | + import meshroom |
| 9 | +except Exception: |
| 10 | + # If meshroom module is not in the PYTHONPATH, add our root using the relative path |
| 11 | + import pathlib |
| 12 | + meshroomRootFolder = pathlib.Path(__file__).parent.parent.resolve() |
| 13 | + sys.path.append(meshroomRootFolder) |
| 14 | + import meshroom |
| 15 | +meshroom.setupEnvironment() |
| 16 | + |
| 17 | +import meshroom.core |
| 18 | +import meshroom.core.graph |
| 19 | +from meshroom.core.submitter import jobManager |
| 20 | +from meshroom.core import submitters |
| 21 | +from meshroom.core.submitter import SubmitterOptionsEnum |
| 22 | +from meshroom.core.node import Status |
| 23 | + |
| 24 | + |
| 25 | +parser = argparse.ArgumentParser(description='Execute a Graph of processes.') |
| 26 | +parser.add_argument('graphFile', metavar='GRAPHFILE.mg', type=str, |
| 27 | + help='Filepath to a graph file.') |
| 28 | +parser.add_argument('--node', metavar='NODE_NAME', type=str, required=True, |
| 29 | + help='Process the node. It will generate an error if the dependencies are not already computed.') |
| 30 | +parser.add_argument('--inCurrentEnv', help='Execute process in current env without creating a dedicated runtime environment.', |
| 31 | + action='store_true') |
| 32 | +parser.add_argument('--forceStatus', help='Force computation if status is RUNNING or SUBMITTED.', |
| 33 | + action='store_true') |
| 34 | +parser.add_argument('--forceCompute', help='Compute in all cases even if already computed.', |
| 35 | + action='store_true') |
| 36 | +parser.add_argument('--extern', help='Use this option when you compute externally after submission to a render farm from meshroom.', |
| 37 | + action='store_true') |
| 38 | +parser.add_argument('--cache', metavar='FOLDER', type=str, |
| 39 | + default=None, |
| 40 | + help='Override the cache folder') |
| 41 | +parser.add_argument('-v', '--verbose', |
| 42 | + help='Set the verbosity level for logging:\n' |
| 43 | + ' - fatal: Show only critical errors.\n' |
| 44 | + ' - error: Show errors only.\n' |
| 45 | + ' - warning: Show warnings and errors.\n' |
| 46 | + ' - info: Show standard informational messages.\n' |
| 47 | + ' - debug: Show detailed debug information.\n' |
| 48 | + ' - trace: Show all messages, including trace-level details.', |
| 49 | + default=os.environ.get('MESHROOM_VERBOSE', 'info'), |
| 50 | + choices=['fatal', 'error', 'warning', 'info', 'debug', 'trace']) |
| 51 | + |
| 52 | +args = parser.parse_args() |
| 53 | + |
| 54 | +# For extern computation, we want to focus on the node computation log. |
| 55 | +# So, we avoid polluting the log with general warning about plugins, versions of nodes in file, etc. |
| 56 | +logging.getLogger().setLevel(level=logging.INFO) |
| 57 | + |
| 58 | +meshroom.core.initPlugins() |
| 59 | +meshroom.core.initNodes() |
| 60 | +meshroom.core.initSubmitters() # Required to spool child job |
| 61 | + |
| 62 | +graph = meshroom.core.graph.loadGraph(args.graphFile) |
| 63 | +if args.cache: |
| 64 | + graph.cacheDir = args.cache |
| 65 | +graph.update() |
| 66 | + |
| 67 | +# Execute the node |
| 68 | +node = graph.findNode(args.node) |
| 69 | +submittedStatuses = [Status.RUNNING] |
| 70 | + |
| 71 | + |
| 72 | +# Find job |
| 73 | +submitter, job = None, None |
| 74 | +# It's required if we want to spool chunks on different machines |
| 75 | +jobInfos = node._nodestatus.jobInfos |
| 76 | +submitterName, jid = jobInfos.get("submitterName"), jobInfos.get("jid") |
| 77 | +for subName, sub in submitters.items(): |
| 78 | + if not sub._options.includes(SubmitterOptionsEnum.RETRIEVE): |
| 79 | + continue |
| 80 | + if submitterName == subName: |
| 81 | + submitter = sub |
| 82 | + job = jobManager.retreiveJob(sub, jid) |
| 83 | + break |
| 84 | + |
| 85 | +if not (hasattr(node, '_chunksCreated') and node._chunksCreated): |
| 86 | + # Create node chunks |
| 87 | + # Once created we don't have to do it again even if we relaunch the job |
| 88 | + node._createChunks() |
| 89 | + |
| 90 | +# Get chunks to process in the current process |
| 91 | +chunksToProcess = [] |
| 92 | +if job: |
| 93 | + if not job.submitterOptions.includes(SubmitterOptionsEnum.EDIT_TASKS): |
| 94 | + chunksToProcess = node.chunks |
| 95 | +else: |
| 96 | + # Cannot retrieve job -> execute process serially |
| 97 | + chunksToProcess = node.chunks |
| 98 | + |
| 99 | +logging.info("[MeshroomCreateChunks] Chunks to process here :", chunksToProcess) |
| 100 | + |
| 101 | +# Identify if the current process |
| 102 | +# is a running chunk or if we need to spool the chunk job |
| 103 | +# |
| 104 | + |
| 105 | +# - create map from job/no des |
| 106 | +# - if we are on a task process |
| 107 | +# - do like before |
| 108 | +pass |
| 109 | + |
| 110 | +if not args.forceStatus and not args.forceCompute: |
| 111 | + for chunk in chunksToProcess: |
| 112 | + if chunk.status.status in submittedStatuses: |
| 113 | + # Particular case for the local isolated, the node status is set to RUNNING by the submitter directly. |
| 114 | + # We ensure that no other instance has started to compute, by checking that the sessionUid is empty. |
| 115 | + if chunk.node.getMrNodeType() == meshroom.core.MrNodeType.NODE and not chunk.status.sessionUid and chunk.status.submitterSessionUid: |
| 116 | + continue |
| 117 | + logging.warning( |
| 118 | + f"[MeshroomCreateChunks] Node is already submitted with status " \ |
| 119 | + f"\"{chunk.status.status.name}\". See file: \"{chunk.statusFile}\". " \ |
| 120 | + f"ExecMode: {chunk.status.execMode.name}, SessionUid: {chunk.status.sessionUid}, " \ |
| 121 | + f"submitterSessionUid: {chunk.status.submitterSessionUid}") |
| 122 | + # sys.exit(-1) |
| 123 | + |
| 124 | +if chunksToProcess: |
| 125 | + node.prepareLogger() |
| 126 | + node.preprocess() |
| 127 | + for chunk in chunksToProcess: |
| 128 | + logging.info(f"[MeshroomCreateChunks] process chunk {chunk}") |
| 129 | + chunk.process(args.forceCompute, args.inCurrentEnv) |
| 130 | + node.postprocess() |
| 131 | + node.restoreLogger() |
| 132 | +else: |
| 133 | + logging.info(f"[MeshroomCreateChunks] -> create job to process chunks {node.chunks}") |
| 134 | + job.addChunkTask(node, graphFile=args.graphFile, cache=args.cache, |
| 135 | + forceStatus=args.forceStatus, forceCompute=args.forceCompute) |
| 136 | + |
| 137 | +# Restore the log level |
| 138 | +logging.getLogger().setLevel(meshroom.logStringToPython[args.verbose]) |
0 commit comments