Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
29ea17c
[core] Update submitter API
Alxiice Oct 13, 2025
ad1e1a3
[core] node/taskManager: create _chunksCreated to delay chunk creatio…
Alxiice Oct 13, 2025
83cdcf7
[node] Add licenses list on node desc to provide a source where we ca…
Alxiice Oct 13, 2025
a75e715
[core] computation : update computation levels
Alxiice Oct 13, 2025
0059a4d
[bin] Add createChunks script
Alxiice Oct 13, 2025
8f6440a
[submitter] Fix SubmitterOptionsEnum.ALL mode on py 3.9
Alxiice Oct 13, 2025
8bc614c
[qml] Fix anchor issue when chunks are emptied
Alxiice Oct 13, 2025
771f1fc
[core] Node : add defaultStatus in _createChunks
Alxiice Oct 17, 2025
5819d55
[core] Start updating taskmanager and submitter for new chunk process
Alxiice Oct 21, 2025
acba5cd
[core] First implementation to kill submitted tasks
Alxiice Oct 21, 2025
c68dd6a
[core] graph : Manage nodeStatus file monitoring
Alxiice Oct 21, 2025
c736f79
[code] submitter : fix issues in dynamic chunks & submitting
Alxiice Oct 24, 2025
f113e9a
[code] graph : Better management of statuses after task/job actions
Alxiice Oct 24, 2025
e4f1b2a
[core] Fix issues on missing chunks fro sfm node & node chunk indicator
Alxiice Oct 27, 2025
ecbcbed
[core] submitter : retrieve job on node update + fix some ui issues
Alxiice Oct 28, 2025
78f6b97
[ui] NodeActions : keep it on the screen if the node cross the top bound
Alxiice Nov 6, 2025
a2d0357
[chunks] Apply typos/cleaning suggestions from @cbentejac
Alxiice Nov 6, 2025
1a9647a
[submitter] Add tools to avoid autoretry on farm
Alxiice Nov 6, 2025
efb53a6
[submitter] Fix interruptJob UI updates
Alxiice Nov 6, 2025
dd45238
[ui] NodeActions : Fix ugly color on submitted nodes
Alxiice Nov 6, 2025
c0c14ee
[submitters] Remove submitters from meshroom to develop them in mrSub…
Alxiice Nov 6, 2025
b1b2d73
[core] local farm : Start creating local farm & submitter
Alxiice Nov 6, 2025
2587aea
Complete localFarm implementation on tests
Alxiice Nov 6, 2025
52457c8
[tests] Only test submitter on linux because localfarm uses unix sockets
Alxiice Nov 6, 2025
dc1b06b
[bin] add executable permission in createChunks
Alxiice Nov 6, 2025
dbc7fd5
Change log on local farm backend
Alxiice Nov 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .codecov.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,4 @@ ignore:
- "docs/"
- "scripts/"
- "bin/"
- "localFarm/" # For now ignore localFarm as it has no coverage yet
42 changes: 38 additions & 4 deletions bin/meshroom_compute
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import argparse
import logging
import os
import sys
from typing import NoReturn

try:
import meshroom
Expand All @@ -16,7 +17,7 @@ meshroom.setupEnvironment()

import meshroom.core
import meshroom.core.graph
from meshroom.core.node import Status, ExecMode
from meshroom.core.node import Status


parser = argparse.ArgumentParser(description='Execute a Graph of processes.')
Expand Down Expand Up @@ -63,20 +64,46 @@ else:

meshroom.core.initPlugins()
meshroom.core.initNodes()
meshroom.core.initSubmitters()

graph = meshroom.core.graph.loadGraph(args.graphFile)
if args.cache:
graph.cacheDir = args.cache
graph.update()


def killRunningJob(node) -> NoReturn:
""" Kills current job and try to avoid job restarting """
jobInfos = node.nodeStatus.jobInfos
submitterName = jobInfos.get("submitterName")
if not submitterName:
sys.exit(meshroom.MeshroomExitStatus.ERROR_NO_RETRY)
from meshroom.core import submitters
for subName, sub in submitters.items():
if submitterName == subName:
sub.killRunningJob()
break
sys.exit(meshroom.MeshroomExitStatus.ERROR_NO_RETRY)


if args.node:
# Execute the node
node = graph.findNode(args.node)
node.updateStatusFromCache()
submittedStatuses = [Status.RUNNING]
if not args.extern:
# If running as "extern", the task is supposed to have the status SUBMITTED.
# If not running as "extern", the SUBMITTED status should generate a warning.
submittedStatuses.append(Status.SUBMITTED)

if not node._chunksCreated:
print(f"Error: Node {node} has been submitted before chunks have been created." \
f"See file: \"{node.nodeStatusFile}\".")
sys.exit(-1)

if node._isInputNode():
print(f"InputNode: No computation to do.")

if not args.forceStatus and not args.forceCompute:
if args.iteration != -1:
chunks = [node.chunks[args.iteration]]
Expand All @@ -85,10 +112,11 @@ if args.node:
for chunk in chunks:
if chunk.status.status in submittedStatuses:
# Particular case for the local isolated, the node status is set to RUNNING by the submitter directly.
# We ensure that no other instance has started to compute, by checking that the sessionUid is empty.
if chunk.node.getMrNodeType() == meshroom.core.MrNodeType.NODE and not chunk.status.sessionUid and chunk.status.submitterSessionUid:
# We ensure that no other instance has started to compute, by checking that the computeSessionUid is empty.
if chunk.node.getMrNodeType() == meshroom.core.MrNodeType.NODE and \
not chunk.status.computeSessionUid and node._nodeStatus.submitterSessionUid:
continue
print(f'Warning: Node is already submitted with status "{chunk.status.status.name}". See file: "{chunk.statusFile}". ExecMode: {chunk.status.execMode.name}, SessionUid: {chunk.status.sessionUid}, submitterSessionUid: {chunk.status.submitterSessionUid}')
print(f'Warning: Node is already submitted with status "{chunk.status.status.name}". See file: "{chunk.statusFile}". ExecMode: {chunk.status.execMode.name}, computeSessionUid: {chunk.status.computeSessionUid}, submitterSessionUid: {node._nodeStatus.submitterSessionUid}')
# sys.exit(-1)

if args.extern:
Expand All @@ -99,8 +127,14 @@ if args.node:
node.preprocess()
if args.iteration != -1:
chunk = node.chunks[args.iteration]
if chunk._status.status == Status.STOPPED:
print(f"Chunk {chunk} : status is STOPPED")
killRunningJob(node)
chunk.process(args.forceCompute, args.inCurrentEnv)
else:
if node.nodeStatus.status == Status.STOPPED:
print(f"Node {node} : status is STOPPED")
killRunningJob(node)
node.process(args.forceCompute, args.inCurrentEnv)
node.postprocess()
node.restoreLogger()
Expand Down
145 changes: 145 additions & 0 deletions bin/meshroom_createChunks
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
#!/usr/bin/env python

"""
This is a script used to wrap the process of processing a node on the farm
It will handle chunk creation and create all the jobs for these chunks
If the submitter cannot create chunks, then it will process the chunks serially
in the current process
"""

import argparse
import logging
import os
import sys
try:
import meshroom
except Exception:
# If meshroom module is not in the PYTHONPATH, add our root using the relative path
import pathlib
meshroomRootFolder = pathlib.Path(__file__).parent.parent.resolve()
sys.path.append(meshroomRootFolder)
import meshroom
meshroom.setupEnvironment()

import meshroom.core
import meshroom.core.graph
from meshroom.core import submitters
from meshroom.core.submitter import SubmitterOptionsEnum
from meshroom.core.node import Status


parser = argparse.ArgumentParser(description='Execute a Graph of processes.')
parser.add_argument('graphFile', metavar='GRAPHFILE.mg', type=str,
help='Filepath to a graph file.')

parser.add_argument('--submitter', type=str, required=True,
help='Name of the submitter used to create the job.')
parser.add_argument('--node', metavar='NODE_NAME', type=str, required=True,
help='Process the node. It will generate an error if the dependencies are not already computed.')
parser.add_argument('--inCurrentEnv', help='Execute process in current env without creating a dedicated runtime environment.',
action='store_true')
parser.add_argument('--forceStatus', help='Force computation if status is RUNNING or SUBMITTED.',
action='store_true')
parser.add_argument('--forceCompute', help='Compute in all cases even if already computed.',
action='store_true')
parser.add_argument('--extern', help='Use this option when you compute externally after submission to a render farm from meshroom.',
action='store_true')
parser.add_argument('--cache', metavar='FOLDER', type=str,
default=None,
help='Override the cache folder')
parser.add_argument('-v', '--verbose',
help='Set the verbosity level for logging:\n'
' - fatal: Show only critical errors.\n'
' - error: Show errors only.\n'
' - warning: Show warnings and errors.\n'
' - info: Show standard informational messages.\n'
' - debug: Show detailed debug information.\n'
' - trace: Show all messages, including trace-level details.',
default=os.environ.get('MESHROOM_VERBOSE', 'info'),
choices=['fatal', 'error', 'warning', 'info', 'debug', 'trace'])

args = parser.parse_args()

# For extern computation, we want to focus on the node computation log.
# So, we avoid polluting the log with general warning about plugins, versions of nodes in file, etc.
logging.getLogger().setLevel(level=logging.INFO)

meshroom.core.initPlugins()
meshroom.core.initNodes()
meshroom.core.initSubmitters() # Required to spool child job

graph = meshroom.core.graph.loadGraph(args.graphFile)
if args.cache:
graph.cacheDir = args.cache
graph.update()

# Execute the node
node = graph.findNode(args.node)
submittedStatuses = [Status.RUNNING]

# Find submitter
submitter = None
# It's required if we want to spool chunks on different machines
for subName, sub in submitters.items():
if args.submitter == subName:
submitter = sub
break

if node._nodeStatus.status in (Status.STOPPED, Status.KILLED):
logging.error("Node status is STOPPED or KILLED.")
if submitter:
submitter.killRunningJob()
sys.exit(meshroom.MeshroomExitStatus.ERROR_NO_RETRY)

if not node._chunksCreated:
# Create node chunks
# Once created we don't have to do it again even if we relaunch the job
node.createChunks()
# Set the chunks statuses
for chunk in node._chunks:
if args.forceCompute or chunk._status.status != Status.SUCCESS:
hasChunkToLaunch = True
chunk._status.setNode(node)
chunk._status.initExternSubmit()
chunk.upgradeStatusFile()

# Get chunks to process in the current process
chunksToProcess = []
if submitter:
if not submitter._options.includes(SubmitterOptionsEnum.EDIT_TASKS):
chunksToProcess = node.chunks
else:
# Cannot retrieve job -> execute process serially
chunksToProcess = node.chunks

logging.info(f"[MeshroomCreateChunks] Chunks to process here : {chunksToProcess}")

if not args.forceStatus and not args.forceCompute:
for chunk in chunksToProcess:
if chunk.status.status in submittedStatuses:
# Particular case for the local isolated, the node status is set to RUNNING by the submitter directly.
# We ensure that no other instance has started to compute, by checking that the sessicomputeSessionUidonUid is empty.
if chunk.node.getMrNodeType() == meshroom.core.MrNodeType.NODE and \
not chunk.status.computeSessionUid and node._nodeStatus.submitterSessionUid:
continue
logging.warning(
f"[MeshroomCreateChunks] Node is already submitted with status " \
f"\"{chunk.status.status.name}\". See file: \"{chunk.statusFile}\". " \
f"ExecMode: {chunk.status.execMode.name}, computeSessionUid: {chunk.status.computeSessionUid}, " \
f"submitterSessionUid: {node._nodeStatus.submitterSessionUid}")

if chunksToProcess:
node.prepareLogger()
node.preprocess()
for chunk in chunksToProcess:
logging.info(f"[MeshroomCreateChunks] process chunk {chunk}")
chunk.process(args.forceCompute, args.inCurrentEnv)
node.postprocess()
node.restoreLogger()
else:
logging.info(f"[MeshroomCreateChunks] -> create job to process chunks {[c for c in node.chunks]}")
submitter.createChunkTask(node, graphFile=args.graphFile, cache=args.cache,
forceStatus=args.forceStatus, forceCompute=args.forceCompute)

# Restore the log level
logging.getLogger().setLevel(meshroom.logStringToPython[args.verbose])
Empty file added localfarm/__init__.py
Empty file.
Loading