-
-
Notifications
You must be signed in to change notification settings - Fork 1.2k
Expand file tree
/
Copy pathmeshroom_createChunks
More file actions
executable file
·151 lines (132 loc) · 6.47 KB
/
meshroom_createChunks
File metadata and controls
executable file
·151 lines (132 loc) · 6.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
#!/usr/bin/env python
"""
This is a script used to wrap the process of processing a node on the farm
It will handle chunk creation and create all the jobs for these chunks
If the submitter cannot create chunks, then it will process the chunks serially
in the current process
"""
import argparse
import logging
import os
import sys
try:
import meshroom
except Exception:
# If meshroom module is not in the PYTHONPATH, add our root using the relative path
import pathlib
meshroomRootFolder = pathlib.Path(__file__).parent.parent.resolve()
sys.path.append(meshroomRootFolder)
import meshroom
meshroom.setupEnvironment()
import meshroom.core
import meshroom.core.graph
from meshroom.core import submitters
from meshroom.core.submitter import SubmitterOptionsEnum
from meshroom.core.node import Status
parser = argparse.ArgumentParser(description='Execute a Graph of processes.')
parser.add_argument('graphFile', metavar='GRAPHFILE.mg', type=str,
help='Filepath to a graph file.')
parser.add_argument('--submitter', type=str, required=True,
help='Name of the submitter used to create the job.')
parser.add_argument('--node', metavar='NODE_NAME', type=str, required=True,
help='Process the node. It will generate an error if the dependencies are not already computed.')
parser.add_argument('--inCurrentEnv', help='Execute process in current env without creating a dedicated runtime environment.',
action='store_true')
parser.add_argument('--forceStatus', help='Force computation if status is RUNNING or SUBMITTED.',
action='store_true')
parser.add_argument('--forceCompute', help='Compute in all cases even if already computed.',
action='store_true')
parser.add_argument('--extern', help='Use this option when you compute externally after submission to a render farm from meshroom.',
action='store_true')
parser.add_argument('--cache', metavar='FOLDER', type=str,
default=None,
help='Override the cache folder')
parser.add_argument('-v', '--verbose',
help='Set the verbosity level for logging:\n'
' - fatal: Show only critical errors.\n'
' - error: Show errors only.\n'
' - warning: Show warnings and errors.\n'
' - info: Show standard informational messages.\n'
' - debug: Show detailed debug information.\n'
' - trace: Show all messages, including trace-level details.',
default=os.environ.get('MESHROOM_VERBOSE', 'info'),
choices=['fatal', 'error', 'warning', 'info', 'debug', 'trace'])
args = parser.parse_args()
# For extern computation, we want to focus on the node computation log.
# So, we avoid polluting the log with general warning about plugins, versions of nodes in file, etc.
logging.getLogger().setLevel(level=logging.INFO)
meshroom.core.initPlugins()
meshroom.core.initNodes()
meshroom.core.initSubmitters() # Required to spool child job
graph = meshroom.core.graph.loadGraph(args.graphFile)
if args.cache:
graph.cacheDir = args.cache
graph.update()
# Execute the node
node = graph.findNode(args.node)
submittedStatuses = [Status.RUNNING]
# Find submitter
submitter = None
# It's required if we want to spool chunks on different machines
for subName, sub in submitters.items():
if args.submitter == subName:
submitter = sub
break
if node._nodeStatus.status in (Status.STOPPED, Status.KILLED):
logging.error("Node status is STOPPED or KILLED.")
if submitter:
submitter.killRunningJob()
sys.exit(meshroom.MeshroomExitStatus.ERROR_NO_RETRY)
if not node._chunksCreated:
# Create node chunks
# Once created we don't have to do it again even if we relaunch the job
node.createChunks()
if not node._chunksCreated:
logging.error(f"Failed to create chunks for node {node.name}.")
# Set the chunks statuses
for chunk in node._chunks:
if args.forceCompute or chunk._status.status != Status.SUCCESS:
hasChunkToLaunch = True
chunk._status.setNode(node)
chunk._status.initExternSubmit()
chunk.upgradeStatusFile()
else:
if not args.forceCompute:
logging.warning(f"Chunk {chunk} status is {chunk._status.status}.")
logging.warning(f"The status file has not been updated for this chunk.")
# Get chunks to process in the current process
chunksToProcess = []
if submitter:
if not submitter._options.includes(SubmitterOptionsEnum.EDIT_TASKS):
chunksToProcess = node.chunks
else:
# Cannot retrieve job -> execute process serially
chunksToProcess = node.chunks
logging.info(f"[MeshroomCreateChunks] Chunks to process here : {chunksToProcess}")
if not args.forceStatus and not args.forceCompute:
for chunk in chunksToProcess:
if chunk.status.status in submittedStatuses:
# Particular case for the local isolated, the node status is set to RUNNING by the submitter directly.
# We ensure that no other instance has started to compute, by checking that the sessicomputeSessionUidonUid is empty.
if chunk.node.getMrNodeType() == meshroom.core.MrNodeType.NODE and \
not chunk.status.computeSessionUid and node._nodeStatus.submitterSessionUid:
continue
logging.warning(
f"[MeshroomCreateChunks] Node is already submitted with status " \
f"\"{chunk.status.status.name}\". See file: \"{chunk.statusFile}\". " \
f"ExecMode: {chunk.status.execMode.name}, computeSessionUid: {chunk.status.computeSessionUid}, " \
f"submitterSessionUid: {node._nodeStatus.submitterSessionUid}")
if chunksToProcess:
node.preprocess()
node.prepareLogger()
for chunk in chunksToProcess:
logging.info(f"[MeshroomCreateChunks] process chunk {chunk}")
chunk.process(args.forceCompute, args.inCurrentEnv)
node.restoreLogger()
node.postprocess()
else:
logging.info(f"[MeshroomCreateChunks] -> create job to process chunks {[c for c in node.chunks]}")
submitter.createChunkTask(node, graphFile=args.graphFile, cache=args.cache,
forceStatus=args.forceStatus, forceCompute=args.forceCompute)
# Restore the log level
logging.getLogger().setLevel(meshroom.logStringToPython[args.verbose])