Description
Is there an existing issue for this problem?
- I have searched the existing issues
Operating system
Linux
GPU vendor
AMD (ROCm)
GPU model
RX 6700 XT
GPU VRAM
12GB
Version number
5.10.1
Browser
Firefox 137.0.2
Python dependencies
{
"version": "5.10.1",
"dependencies": {
"accelerate" : "1.6.0" ,
"compel" : "2.0.2" ,
"cuda" : null ,
"diffusers" : "0.33.0" ,
"numpy" : "1.26.3" ,
"opencv" : "4.9.0.80" ,
"onnx" : "1.16.1" ,
"pillow" : "11.0.0" ,
"python" : "3.12.10" ,
"torch" : "2.6.0+rocm6.2.4" ,
"torchvision" : "0.21.0+rocm6.2.4",
"transformers": "4.51.3" ,
"xformers" : null
},
"config": {
"schema_version": "4.0.2",
"legacy_models_yaml_path": null,
"host": "127.0.0.1",
"port": 9090,
"allow_origins": [],
"allow_credentials": true,
"allow_methods": [""],
"allow_headers": [""],
"ssl_certfile": null,
"ssl_keyfile": null,
"log_tokenization": false,
"patchmatch": true,
"models_dir": "models",
"convert_cache_dir": "models/.convert_cache",
"download_cache_dir": "models/.download_cache",
"legacy_conf_dir": "configs",
"db_dir": "databases",
"outputs_dir": "/home/andre/Documents/Tmp/invokeai2/outputs",
"custom_nodes_dir": "nodes",
"style_presets_dir": "style_presets",
"workflow_thumbnails_dir": "workflow_thumbnails",
"log_handlers": ["console"],
"log_format": "color",
"log_level": "info",
"log_sql": false,
"log_level_network": "warning",
"use_memory_db": false,
"dev_reload": false,
"profile_graphs": false,
"profile_prefix": null,
"profiles_dir": "profiles",
"max_cache_ram_gb": null,
"max_cache_vram_gb": null,
"log_memory_usage": false,
"device_working_mem_gb": 8,
"enable_partial_loading": false,
"keep_ram_copy_of_weights": true,
"ram": null,
"vram": null,
"lazy_offload": true,
"pytorch_cuda_alloc_conf": null,
"device": "auto",
"precision": "auto",
"sequential_guidance": false,
"attention_type": "sliced",
"attention_slice_size": "balanced",
"force_tiled_decode": false,
"pil_compress_level": 1,
"max_queue_size": 10000,
"clear_queue_on_startup": false,
"allow_nodes": null,
"deny_nodes": null,
"node_cache_size": 512,
"hashing_algorithm": "blake3_single",
"remote_api_tokens": null,
"scan_models_on_startup": false
},
"set_config_fields": [
"attention_type" , "device_working_mem_gb" , "attention_slice_size" , "outputs_dir" ,
"legacy_models_yaml_path"
]
}
What happened
Upon clicking Invoke, seemingly at random, instead of beginning the generation process, it spits out this error:
[2025-04-22 09:35:41,174]::[InvokeAI]::ERROR --> Non-fatal error in session processor ValidationError: 1 validation error for GraphExecutionState
JSON input should be string, bytes or bytearray [type=json_type, input_value=None, input_type=NoneType]
For further information visit https://errors.pydantic.dev/2.11/v/json_type
[2025-04-22 09:35:41,175]::[InvokeAI]::ERROR --> Traceback (most recent call last):
File ".venv/lib/python3.12/site-packages/invokeai/app/services/session_processor/session_processor_default.py", line 434, in _process
self._queue_item = self._invoker.services.session_queue.dequeue()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".venv/lib/python3.12/site-packages/invokeai/app/services/session_queue/session_queue_sqlite.py", line 171, in dequeue
queue_item = self._set_queue_item_status(item_id=queue_item.item_id, status="in_progress")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".venv/lib/python3.12/site-packages/invokeai/app/services/session_queue/session_queue_sqlite.py", line 237, in _set_queue_item_status
queue_status = self.get_queue_status(queue_id=queue_item.queue_id)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".venv/lib/python3.12/site-packages/invokeai/app/services/session_queue/session_queue_sqlite.py", line 613, in get_queue_status
current_item = self.get_current(queue_id=queue_id)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".venv/lib/python3.12/site-packages/invokeai/app/services/session_queue/session_queue_sqlite.py", line 211, in get_current
return SessionQueueItem.queue_item_from_dict(dict(result))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".venv/lib/python3.12/site-packages/invokeai/app/services/session_queue/session_queue_common.py", line 298, in queue_item_from_dict
queue_item_dict["session"] = get_session(queue_item_dict)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".venv/lib/python3.12/site-packages/invokeai/app/services/session_queue/session_queue_common.py", line 192, in get_session
session = GraphExecutionStateValidator.validate_json(session_raw, strict=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".venv/lib/python3.12/site-packages/pydantic/type_adapter.py", line 468, in validate_json
return self.validator.validate_json(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
pydantic_core._pydantic_core.ValidationError: 1 validation error for GraphExecutionState
JSON input should be string, bytes or bytearray [type=json_type, input_value=None, input_type=NoneType]
For further information visit https://errors.pydantic.dev/2.11/v/json_type
What you expected to happen
Normal image generation.
How to reproduce the problem
Reproduction is unclear. Seems to happen at random, and infrequently. I can press Invoke again immediately after the error, and it works as usual.
Additional context
Was testing with NoobAI-XL 1.1 Eps plus a Lora. Unclear if this issue is model-specific.
Discord username
cubethethird