Skip to content

Commit 80a2a73

Browse files
authored
Merge pull request #148 from Soft-CPS-Research-Group/master
Fixing identified #145, testing and adding toggles for rendering data for the UI
2 parents cb22c99 + a89ae93 commit 80a2a73

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+420658
-217
lines changed

.gitignore

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,12 @@ ENV/
123123
env.bak/
124124
venv.bak/
125125
*env/
126+
# Virtual envs (stray names with tilde)
127+
venv~/
128+
**/venv~/
129+
130+
# Editor backup files (just in case)
131+
*~
126132

127133
# Spyder project settings
128134
.spyderproject
@@ -174,3 +180,7 @@ examples/*
174180

175181
# ignore folder
176182
.ignore
183+
184+
# Simulation outputs
185+
render_logs/
186+
results/

README.md

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,4 +16,19 @@ pip install CityLearn
1616
```
1717

1818
## Documentation
19-
Refer to the [docs](https://intelligent-environments-lab.github.io/CityLearn/).
19+
Refer to the [docs](https://intelligent-environments-lab.github.io/CityLearn/).
20+
21+
## CityLearn UI
22+
23+
CityLearn UI is a visual dashboard for exploring simulation data generated by the CityLearn framework. It was developed to simplify the analysis of results from smart energy communities, district energy coordination, demand response (among other applications), allowing users to visually inspect building-level components, compare simulation KPIs, and create simulation schemas with ease.
24+
25+
The interface is available in two options:
26+
27+
* Web app: https://citylearn-ui.softcps.org (free hosted version — not recommended for sensitive/personal data)
28+
* Open-source code: https://github.com/SoftCPS/citylearn-ui
29+
30+
You can check a tutorial at the official CityLearn [website](https://intelligent-environments-lab.github.io/CityLearn/ui.html) or in the CityLearn UI repository README.
31+
32+
**Compatibility:** This version of the UI currently supports CityLearn v2.4 simulation data.
33+
34+
**Developed by:** SoftCPS, Software for Cyber-Physical Systems (SoftCPS) research group (ISEP, Portugal) in collaboration with the Intelligent Environments Lab, University of Texas at Austin.

citylearn/building.py

Lines changed: 366 additions & 42 deletions
Large diffs are not rendered by default.

citylearn/citylearn.py

Lines changed: 251 additions & 107 deletions
Large diffs are not rendered by default.

citylearn/data.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -439,13 +439,16 @@ def __init__(
439439
if time_delta < 0:
440440
time_delta += 1440
441441

442+
base_step_seconds = None
443+
444+
if time_delta is not None:
445+
# Convert dataset spacing to seconds (guard against zero/negative values)
446+
candidate = max(1, time_delta * 60)
447+
base_step_seconds = candidate
448+
442449
time_step_ratio = (
443-
# Computes the ratio of the current time step (in seconds) relative to:
444-
# - 1 hour (3600s) if time_delta ≤ 1 hour, OR
445-
# - time_delta (converted to seconds) if time_delta > 1 hour
446-
# Returns None if either time_delta or seconds_per_time_step is missing
447-
seconds_per_time_step / max(3600, time_delta * 60)
448-
if time_delta is not None and seconds_per_time_step
450+
seconds_per_time_step / base_step_seconds
451+
if seconds_per_time_step and base_step_seconds
449452
else None
450453
)
451454
time_step_ratios.append(time_step_ratio)
@@ -814,4 +817,4 @@ def parse_profile(profile_str):
814817
return np.array([], dtype=float)
815818

816819

817-
self.load_profile = np.array([parse_profile(lp) for lp in load_profile], dtype=object)
820+
self.load_profile = np.array([parse_profile(lp) for lp in load_profile], dtype=object)

citylearn/energy_model.py

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,10 @@
55
from typing import Any, Dict, Iterable, List, Mapping, Tuple, Union
66
import numpy as np
77
import pandas as pd
8-
from PySAM import Pvwattsv8
8+
try:
9+
from PySAM import Pvwattsv8
10+
except ModuleNotFoundError: # pragma: no cover - optional dependency
11+
Pvwattsv8 = None
912
from citylearn.base import Environment, EpisodeTracker
1013
from citylearn.data import DataSet, ZERO_DIVISION_PLACEHOLDER, EnergySimulation, WashingMachineSimulation
1114
np.seterr(divide='ignore', invalid='ignore')
@@ -536,7 +539,9 @@ def autosize(self, demand: float, epw_filepath: Union[Path, str], use_sample_tar
536539

537540
for i in range(tries):
538541
self._autosize_config = sizing_data.sample(1, random_state=random_seed + i).iloc[0].to_dict()
539-
model = Pvwattsv8.default('PVWattsNone')
542+
if Pvwattsv8 is None:
543+
raise ModuleNotFoundError('PySAM is required for PV sizing but is not installed.')
544+
model = Pvwattsv8.default('PVWattsNone')
540545
pv_nominal_power = self.autosize_config['nameplate_capacity_module_1']/1000.0
541546
model.SystemDesign.system_capacity = pv_nominal_power
542547
model.SystemDesign.dc_ac_ratio = self.autosize_config['inverter_loading_ratio']
@@ -660,11 +665,11 @@ def energy_init(self) -> float:
660665
return max(0.0, self.__soc[self.time_step]*self.capacity*(1 - self.loss_coefficient))
661666
return max(0.0, self.__soc[self.time_step - 1]*self.capacity*(1 - self.loss_coefficient))
662667

663-
@property
664-
def energy_balance(self) -> np.ndarray:
665-
r"""Charged/discharged energy time series in [kWh]."""
666-
667-
return self.__energy_balance * self.time_step_ratio
668+
@property
669+
def energy_balance(self) -> np.ndarray:
670+
r"""Charged/discharged energy time series in [kWh]."""
671+
672+
return self.__energy_balance
668673

669674
@property
670675
def round_trip_efficiency(self) -> float:
@@ -756,10 +761,11 @@ def set_energy_balance(self, energy: float, energy_init:float) -> float:
756761
actual energy charged/discharged irrespective of what is determined in the step function after taking into account storage design limits
757762
e.g. maximum power input/output, capacity.
758763
"""
759-
energy = energy * self.time_step_ratio
760-
energy -= energy_init
761-
energy_balance = energy/self.round_trip_efficiency if energy >= 0 else energy*self.round_trip_efficiency
762-
return energy_balance
764+
delta_energy = energy - energy_init
765+
if delta_energy >= 0:
766+
return delta_energy / self.round_trip_efficiency
767+
768+
return delta_energy * self.round_trip_efficiency
763769

764770
def autosize(self, demand: Iterable[float], safety_factor: Union[float, Tuple[float, float]] = None) -> float:
765771
r"""Autosize `capacity`.
@@ -1389,4 +1395,4 @@ def render_simulation_end_data(self) -> dict:
13891395
return {
13901396
"simulation_name": self.name if self.name else "WashingMachineSimulation",
13911397
"data": time_steps
1392-
}
1398+
}

citylearn/reward_function.py

Lines changed: 73 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,11 @@ class RewardFunction:
1818
Other keyword arguments for custom reward calculation.
1919
"""
2020

21-
def __init__(self, env_metadata: Mapping[str, Any], exponent: float = None, **kwargs):
22-
self.env_metadata = env_metadata
23-
self.exponent = exponent
21+
def __init__(self, env_metadata: Mapping[str, Any], exponent: float = None, **kwargs):
22+
penalty_coefficient = kwargs.pop('charging_constraint_penalty_coefficient', None)
23+
self.env_metadata = env_metadata
24+
self.exponent = exponent
25+
self.charging_constraint_penalty_coefficient = penalty_coefficient
2426

2527
@property
2628
def env_metadata(self) -> Mapping[str, Any]:
@@ -41,8 +43,19 @@ def exponent(self) -> float:
4143
return self.__exponent
4244

4345
@exponent.setter
44-
def exponent(self, exponent: float):
45-
self.__exponent = 1.0 if exponent is None else exponent
46+
def exponent(self, exponent: float):
47+
self.__exponent = 1.0 if exponent is None else exponent
48+
49+
@property
50+
def charging_constraint_penalty_coefficient(self) -> float:
51+
return getattr(self, '_charging_constraint_penalty_coefficient', 1.0)
52+
53+
@charging_constraint_penalty_coefficient.setter
54+
def charging_constraint_penalty_coefficient(self, coefficient: float):
55+
if coefficient is None:
56+
self._charging_constraint_penalty_coefficient = 1.0
57+
else:
58+
self._charging_constraint_penalty_coefficient = float(coefficient)
4659

4760
def reset(self):
4861
"""Use to reset variables at the start of an episode."""
@@ -379,40 +392,56 @@ class Electric_Vehicles_Reward_Function(MARL):
379392
Only affects EV-related behavior; other building logic comes from the superclass.
380393
"""
381394

382-
def __init__(self, env_metadata: Mapping[str, Any], weights: Mapping[str, float] = None):
383-
super().__init__(env_metadata)
384-
385-
# Default tunable weights for EV-related reward components
386-
self.weights = weights or {
387-
"no_car_charging": -5.0,
388-
"battery_limits": -2.0,
389-
"soc_impossible": -10.0,
390-
"soc_under": -5.0,
391-
"close_soc": 10.0,
392-
"self_ev_consumption": 5.0,
393-
"extra_self_production": 5.0,
394-
}
395-
396-
def calculate(self, observations: List[Mapping[str, Union[int, float, dict]]]) -> List[float]:
397-
current_reward = super().calculate(observations)
398-
reward_list = []
399-
400-
for i, o in enumerate(observations):
401-
ev_info = o.get("electric_vehicles_chargers_dict", {})
402-
if not ev_info:
403-
reward=0
404-
else:
405-
if self.central_agent:
406-
reward_value = current_reward[0] if isinstance(current_reward, list) else current_reward
407-
reward = self.calculate_ev_penalty(o, reward_value)
408-
else:
409-
reward = self.calculate_ev_penalty(o, current_reward[i])
410-
411-
reward_list.append(reward)
412-
413-
total_reward = [sum(reward_list)] if self.central_agent else reward_list
414-
LOGGER.info(f"Calculated EV reward: {total_reward}")
415-
return total_reward
395+
def __init__(self, env_metadata: Mapping[str, Any], weights: Mapping[str, float] = None):
396+
super().__init__(env_metadata)
397+
398+
# Default tunable weights for EV-related reward components
399+
self.weights = weights or {
400+
"no_car_charging": -5.0,
401+
"battery_limits": -2.0,
402+
"soc_impossible": -10.0,
403+
"soc_under": -5.0,
404+
"close_soc": 10.0,
405+
"self_ev_consumption": 5.0,
406+
"extra_self_production": 5.0,
407+
}
408+
self._last_base_reward_total = 0.0
409+
self._last_penalty_total = 0.0
410+
self._last_base_rewards_per_building: List[float] = []
411+
self._last_penalties_per_building: List[float] = []
412+
413+
def calculate(self, observations: List[Mapping[str, Union[int, float, dict]]]) -> List[float]:
414+
current_reward = super().calculate(observations)
415+
reward_list = []
416+
base_rewards = []
417+
penalty_values = []
418+
419+
for i, o in enumerate(observations):
420+
ev_info = o.get("electric_vehicles_chargers_dict", {})
421+
if not ev_info:
422+
reward = 0
423+
else:
424+
if self.central_agent:
425+
reward_value = current_reward[0] if isinstance(current_reward, list) else current_reward
426+
reward = self.calculate_ev_penalty(o, reward_value)
427+
else:
428+
reward = self.calculate_ev_penalty(o, current_reward[i])
429+
430+
base_rewards.append(reward)
431+
violation = float(o.get("charging_constraint_violation_kwh", 0.0) or 0.0)
432+
penalty = violation * self.charging_constraint_penalty_coefficient if violation > 0.0 else 0.0
433+
penalty_values.append(penalty)
434+
reward -= penalty
435+
436+
reward_list.append(reward)
437+
438+
self._last_base_rewards_per_building = base_rewards
439+
self._last_penalties_per_building = penalty_values
440+
total_reward = [sum(reward_list)] if self.central_agent else reward_list
441+
self._last_base_reward_total = sum(base_rewards) if self.central_agent else base_rewards
442+
self._last_penalty_total = sum(penalty_values) if self.central_agent else penalty_values
443+
LOGGER.info(f"Calculated EV reward: {total_reward}")
444+
return total_reward
416445

417446
def calculate_ev_penalty(self, o: Mapping[str, Union[int, float, dict]], current_reward: float) -> float:
418447

@@ -437,7 +466,9 @@ def calculate_ev_penalty(self, o: Mapping[str, Union[int, float, dict]], current
437466
soc_now = data.get("battery_soc")
438467
capacity = data.get("battery_capacity")
439468
min_capacity = data.get("min_capacity")
440-
last_charged_kwh = data.get("last_charged_kwh", 0)
469+
last_charged_kwh = data.get("last_charged_kwh", 0)
470+
if last_charged_kwh is None:
471+
last_charged_kwh = 0.0
441472
required_soc = data.get("required_soc")
442473
hours_until_departure = data.get("hours_until_departure", 0)
443474
max_charging_power = data.get("max_charging_power", 0)
@@ -446,8 +477,8 @@ def calculate_ev_penalty(self, o: Mapping[str, Union[int, float, dict]], current
446477
if soc_prev is None or soc_now is None or capacity is None:
447478
raise ValueError("Something went wrong, this values should not be none")
448479
continue
449-
# Battery limits
450-
current_energy = soc_prev * capacity + last_charged_kwh
480+
# Battery limits
481+
current_energy = soc_prev * capacity + last_charged_kwh
451482
if current_energy > capacity or current_energy < min_capacity:
452483
contributions["battery_limits"] += self.weights["battery_limits"] * penalty_multiplier
453484

citylearn/tests/test_alignment.py

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
"""Tests for timestep alignment and EV charger integration."""
2+
3+
from pathlib import Path
4+
import math
5+
import numpy as np
6+
import pytest
7+
8+
from citylearn.citylearn import CityLearnEnv
9+
10+
11+
DATASET = Path(__file__).resolve().parents[2] / 'data/datasets/citylearn_challenge_2022_phase_all_plus_evs/schema.json'
12+
13+
14+
def _finite(value):
15+
if isinstance(value, (list, tuple, np.ndarray)):
16+
return np.all(np.isfinite(value))
17+
18+
return math.isfinite(float(value))
19+
20+
21+
def test_timestep_and_chargers():
22+
env = CityLearnEnv(str(DATASET), central_agent=True)
23+
24+
try:
25+
obs, _ = env.reset()
26+
zeros = np.zeros(env.action_space[0].shape[0], dtype='float32')
27+
28+
obs, r0, term, trunc, _ = env.step([zeros])
29+
assert env.time_step == 1
30+
assert _finite(r0)
31+
32+
names = env.action_names[0]
33+
charger_index = next((i for i, name in enumerate(names) if name.startswith('electric_vehicle_storage_')), None)
34+
35+
if charger_index is None:
36+
pytest.skip('Dataset does not expose EV storage actions.')
37+
38+
actions = np.zeros_like(zeros)
39+
actions[charger_index] = 0.1
40+
41+
obs, r1, term, trunc, _ = env.step([actions])
42+
assert env.time_step == 2
43+
assert _finite(r1)
44+
45+
building = env.buildings[0]
46+
t = env.time_step - 1
47+
lhs = building.net_electricity_consumption[t]
48+
rhs = (
49+
building.cooling_electricity_consumption[t]
50+
+ building.heating_electricity_consumption[t]
51+
+ building.dhw_electricity_consumption[t]
52+
+ building.non_shiftable_load_electricity_consumption[t]
53+
+ building.electrical_storage_electricity_consumption[t]
54+
+ building.solar_generation[t]
55+
+ building.chargers_electricity_consumption[t]
56+
+ building.washing_machines_electricity_consumption[t]
57+
)
58+
assert abs(lhs - rhs) < 1e-4
59+
60+
finally:
61+
env.close()

0 commit comments

Comments
 (0)