Skip to content

Benchmarking simulation time on multiple grid sizes #40

@ufuk-cakir

Description

@ufuk-cakir

I want to benchmark the speed of the simulation at multiple grid sizes without agent interaction.
I was wondering what the correct way to do this is.

Currently I do the following, with the config mainly taken from the example configs.

def build_config_dict(
    screen_size: Tuple[int, int], functional_terrain: bool = True
) -> Dict[str, Any]:
    H, W = screen_size
    cfg = {
        "area": {"screen_size": [H, W], "pixel_scale": 50},
        "display": {
            "rescale_factor": 1,
            "agent_size": 4,
            "fire_size": 2,
            "control_line_size": 2,
        },
        "simulation": {
            "update_rate": 1,
            "runtime": "1h", # to make sure the simulation runs long enoguh
            "headless": True,
            "draw_spread_graph": False,
            "record": False,
            "save_data": False,
            "data_type": "npy",
            "sf_home": "~/.simfire",
        },
        "mitigation": {"ros_attenuation": False},
        "operational": {
            "seed": None,
            "latitude": 37.5,
            "longitude": -120.0,
            "height": 2000,
            "width": 2000,
            "resolution": 30,
            "year": 2020,
        },
        "fire": {
            "fire_initial_position": {
                "type": "static",
                "static": {"position": (25, 25)},
            },
            "max_fire_duration": 5,
            "diagonal_spread": True,
        },
        "environment": {"moisture": 0.001},
        "wind": {
            "function": "perlin",
            "perlin": {
                "speed": {
                    "seed": 2345,
                    "scale": 400,
                    "octaves": 3,
                    "persistence": 0.7,
                    "lacunarity": 2.0,
                    "range_min": 7,
                    "range_max": 47,
                },
                "direction": {
                    "seed": 650,
                    "scale": 1500,
                    "octaves": 2,
                    "persistence": 0.9,
                    "lacunarity": 1.0,
                    "range_min": 0.0,
                    "range_max": 360.0,
                },
            },
        },
    }
    if functional_terrain:
        cfg["terrain"] = {
            "topography": {
                "type": "functional",
                "functional": {
                    "function": "perlin",
                    "perlin": {
                        "octaves": 3,
                        "persistence": 0.7,
                        "lacunarity": 2.0,
                        "seed": 827,
                        "range_min": 100.0,
                        "range_max": 300.0,
                    },
                },
            },
            "fuel": {
                "type": "functional",
                "functional": {"function": "chaparral", "chaparral": {"seed": 1113}},
            },
        }
    else:
        cfg["terrain"] = {
            "topography": {"type": "operational"},
            "fuel": {"type": "operational"},
        }
    return cfg


def _run_once(
    screen_size: Tuple[int, int],
    steps: int,
    update_agents_each_step: bool = False,
    force_functional_terrain: bool = True,
) -> Tuple[float, int]:
    cfg_dict = build_config_dict(
        screen_size, functional_terrain=force_functional_terrain
    )

    cfg_dict["simulation"]["runtime"] = f"{max(steps * 2, 1000)}m"
    cfg_dict["fire"]["max_fire_duration"] = max(
        int(cfg_dict["fire"].get("max_fire_duration", 0)), steps * 2
    )

    sim = FireSimulation(Config(config_dict=cfg_dict))

    # warmup
    s0 = sim.elapsed_steps
    sim.run(1)
    warmup_steps = sim.elapsed_steps - s0

    start = time.perf_counter()
    if update_agents_each_step:
        H, W = sim.config.area.screen_size
        for i in range(steps):
            if not sim.active:  # stop if sim ended 
                break
            c0 = (5 + i) % W
            r0 = (5 + i) % H
            c1 = (6 + i) % W
            r1 = (6 + i) % H
            sim.update_agent_positions([(c0, r0, 0), (c1, r1, 1)])
            sim.run(1)
    else:
        print(f"Running {steps} steps without agent updates...")
        sim.run(steps)
    end = time.perf_counter()
    actual_steps = max(0, sim.elapsed_steps - warmup_steps)
    print(f"Completed {actual_steps} steps")
    print(f"Elapsed time: {end - start:.3f}s")
    return (end - start), actual_steps

Running the simulation on 128x128 gridzies for 200 steps without agent interaction takes about 60 seconds.
Is this reasonable? And is this the correct way to benchmark simulation time?

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions