Skip to content

create preliminary resource monitor manager #19465

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 4 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 48 additions & 39 deletions chia/server/start_data_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from chia.util.chia_logging import initialize_logging
from chia.util.config import load_config, load_config_cli
from chia.util.default_root import resolve_root_path
from chia.util.resource_monitor import ResourceMonitor, ResourceMonitorConfiguration
from chia.util.task_timing import maybe_manage_task_instrumentation

# See: https://bugs.python.org/issue29288
Expand Down Expand Up @@ -92,7 +93,7 @@ def create_data_layer_service(
)


async def async_main(root_path: pathlib.Path) -> int:
async def async_main(root_path: pathlib.Path, resource_monitor: ResourceMonitor) -> int:
# TODO: refactor to avoid the double load
config = load_config(root_path, "config.yaml", fill_missing_services=True)
service_config = load_config_cli(root_path, "config.yaml", SERVICE_NAME, fill_missing_services=True)
Expand All @@ -103,49 +104,57 @@ async def async_main(root_path: pathlib.Path) -> int:
root_path=root_path,
)

create_all_ssl(
root_path=root_path,
private_node_names=["data_layer"],
public_node_names=["data_layer"],
overwrite=False,
)

plugins_config = config["data_layer"].get("plugins", {})
service_dir = root_path / SERVICE_NAME

old_uploaders = config["data_layer"].get("uploaders", [])
new_uploaders = plugins_config.get("uploaders", [])
conf_file_uploaders = await load_plugin_configurations(service_dir, "uploaders", log)
uploaders: list[PluginRemote] = [
*(PluginRemote(url=url) for url in old_uploaders),
*(PluginRemote.unmarshal(marshalled=marshalled) for marshalled in new_uploaders),
*conf_file_uploaders,
]

old_downloaders = config["data_layer"].get("downloaders", [])
new_downloaders = plugins_config.get("downloaders", [])
conf_file_uploaders = await load_plugin_configurations(service_dir, "downloaders", log)
downloaders: list[PluginRemote] = [
*(PluginRemote(url=url) for url in old_downloaders),
*(PluginRemote.unmarshal(marshalled=marshalled) for marshalled in new_downloaders),
*conf_file_uploaders,
]

service = create_data_layer_service(root_path, config, downloaders, uploaders)
async with SignalHandlers.manage() as signal_handlers:
await service.setup_process_global_state(signal_handlers=signal_handlers)
await service.run()

return 0
async with resource_monitor.manage():
create_all_ssl(
root_path=root_path,
private_node_names=["data_layer"],
public_node_names=["data_layer"],
overwrite=False,
)

plugins_config = config["data_layer"].get("plugins", {})
service_dir = root_path / SERVICE_NAME

old_uploaders = config["data_layer"].get("uploaders", [])
new_uploaders = plugins_config.get("uploaders", [])
conf_file_uploaders = await load_plugin_configurations(service_dir, "uploaders", log)
uploaders: list[PluginRemote] = [
*(PluginRemote(url=url) for url in old_uploaders),
*(PluginRemote.unmarshal(marshalled=marshalled) for marshalled in new_uploaders),
*conf_file_uploaders,
]

old_downloaders = config["data_layer"].get("downloaders", [])
new_downloaders = plugins_config.get("downloaders", [])
conf_file_uploaders = await load_plugin_configurations(service_dir, "downloaders", log)
downloaders: list[PluginRemote] = [
*(PluginRemote(url=url) for url in old_downloaders),
*(PluginRemote.unmarshal(marshalled=marshalled) for marshalled in new_downloaders),
*conf_file_uploaders,
]

service = create_data_layer_service(root_path, config, downloaders, uploaders)
async with SignalHandlers.manage() as signal_handlers:
await service.setup_process_global_state(signal_handlers=signal_handlers)
await service.run()

return 0


def main() -> int:
root_path = resolve_root_path(override=None)

with maybe_manage_task_instrumentation(
enable=os.environ.get(f"CHIA_INSTRUMENT_{SERVICE_NAME.upper()}") is not None
):
return async_run(coro=async_main(root_path=root_path))
# TODO: just hacking for now, pr not ready until this is removed
service_config: dict[str, Any] = {}
service_config["resource_monitor"] = {"process_memory": True}

with ResourceMonitor.managed(
log=log, config=ResourceMonitorConfiguration.create(service_config=service_config)
) as resource_monitor:
with maybe_manage_task_instrumentation(
enable=os.environ.get(f"CHIA_INSTRUMENT_{SERVICE_NAME.upper()}") is not None
):
return async_run(coro=async_main(root_path=root_path, resource_monitor=resource_monitor))


if __name__ == "__main__":
Expand Down
11 changes: 11 additions & 0 deletions chia/util/initial-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,9 @@ logging: &logging
log_syslog_host: "localhost" # Send logging messages to a remote or local Unix syslog
log_syslog_port: 514 # UDP port of the remote or local Unix syslog

resource_monitoring: &resource_monitoring
process_memory: False

seeder:
# The fake full node used for crawling will run on this port.
port: 8444
Expand Down Expand Up @@ -130,6 +133,7 @@ seeder:
network_overrides: *network_overrides
selected_network: *selected_network
logging: *logging
resource_monitoring: *resource_monitoring
# Crawler is its own standalone service within the seeder component
crawler:
start_rpc_server: True
Expand Down Expand Up @@ -158,6 +162,7 @@ harvester:
parallel_read: True

logging: *logging
resource_monitoring: *resource_monitoring
network_overrides: *network_overrides
selected_network: *selected_network

Expand Down Expand Up @@ -224,6 +229,7 @@ farmer:
# To send a share to a pool, a proof of space must have required_iters less than this number
pool_share_threshold: 1000
logging: *logging
resource_monitoring: *resource_monitoring
network_overrides: *network_overrides
selected_network: *selected_network

Expand Down Expand Up @@ -262,6 +268,7 @@ timelord:
host: *self_hostname
port: 8000
logging: *logging
resource_monitoring: *resource_monitoring
network_overrides: *network_overrides
selected_network: *selected_network
# Bluebox (sanitizing timelord):
Expand Down Expand Up @@ -436,6 +443,7 @@ full_node:
port: 8444
enable_private_networks: False
logging: *logging
resource_monitoring: *resource_monitoring
network_overrides: *network_overrides
selected_network: *selected_network

Expand Down Expand Up @@ -476,6 +484,7 @@ introducer:
# recent_peer_threshold seconds
recent_peer_threshold: 6000
logging: *logging
resource_monitoring: *resource_monitoring
network_overrides: *network_overrides
selected_network: *selected_network
dns_servers: *dns_servers
Expand Down Expand Up @@ -534,6 +543,7 @@ wallet:
log_sqlite_cmds: False

logging: *logging
resource_monitoring: *resource_monitoring
network_overrides: *network_overrides
selected_network: *selected_network

Expand Down Expand Up @@ -648,6 +658,7 @@ data_layer:
enable_batch_autoinsert: True

logging: *logging
resource_monitoring: *resource_monitoring

ssl:
private_crt: "config/ssl/data_layer/private_data_layer.crt"
Expand Down
Loading
Loading