-
Notifications
You must be signed in to change notification settings - Fork 19
Migrate to pyenergyplus #541
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 19 commits
0f58837
2698801
0f98b0b
dbc3978
9b56504
67f2f57
8e63da1
ff98040
6696232
7c4e45d
96bc99a
978d947
2129555
24559ad
5c792ff
8cd08ad
d0bc4d2
de7c73c
d5e9098
fdbcaa5
52bc234
8637193
415aebc
9fad399
9fb7bc0
aad0f83
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -21,7 +21,7 @@ jobs: | |
| - name: Install Python | ||
| uses: actions/setup-python@v5 | ||
| with: | ||
| python-version: "3.8" | ||
| python-version: "3.11" | ||
|
|
||
| - name: Run pre-commit | ||
| uses: pre-commit/[email protected] | ||
|
|
@@ -39,7 +39,7 @@ jobs: | |
| - name: Install Python | ||
| uses: actions/setup-python@v5 | ||
| with: | ||
| python-version: "3.8" | ||
| python-version: "3.11" | ||
|
|
||
| - name: Install poetry | ||
| uses: abatilo/actions-poetry@v3 | ||
|
|
@@ -92,7 +92,7 @@ jobs: | |
| GIT_COMMIT: ${{ github.sha }} | ||
| run: | | ||
| printenv | ||
| docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d worker mongo redis minio mc goaws | ||
| docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d worker mongo redis minio mc | ||
|
|
||
| - name: Dump docker logs before tests | ||
| uses: jwalton/gh-docker-logs@v2 | ||
|
|
@@ -117,7 +117,7 @@ jobs: | |
| - name: Install Python | ||
| uses: actions/setup-python@v5 | ||
| with: | ||
| python-version: "3.8" | ||
| python-version: "3.11" | ||
|
|
||
| - name: Install poetry | ||
| uses: abatilo/actions-poetry@v3 | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -92,6 +92,11 @@ class AlfalfaAPI { | |
| return await getHashValue(this.redis, run.ref_id, "sim_time"); | ||
| }; | ||
|
|
||
| getRunLog = async (run) => { | ||
| const log_lines = await this.redis.lRange(`run:${run.ref_id}:log`, -100, -1); | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nice. I like the simple design of streaming the log into redis. is this new or is only the api endpoint new? I guess I'll find out as I go through this PR. The only question that comes to mind is what might the load look like as the scale gets big? Redis is powerful, but I sense these logs could get verbose. Nevertheless, good stuff, and if there is a performance impact I'm sure it can be mitigated by filtering the logs or something.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good question. |
||
| return log_lines.join("\n"); | ||
| }; | ||
|
|
||
| getPointsByRun = async (run) => { | ||
| const pointsCursor = this.points.find({ run: run._id }); | ||
| return Promise.resolve(pointsCursor.toArray()); | ||
|
|
@@ -126,7 +131,8 @@ class AlfalfaAPI { | |
| const pointDict = { | ||
| id: point.ref_id, | ||
| name: point.name, | ||
| type: point.point_type | ||
| type: point.point_type, | ||
| units: point.units | ||
| }; | ||
| return pointDict; | ||
| }; | ||
|
|
@@ -197,7 +203,7 @@ class AlfalfaAPI { | |
|
|
||
| const { startDatetime, endDatetime, timescale, realtime, externalClock } = data; | ||
|
|
||
| const job = `alfalfa_worker.jobs.${sim_type === "MODELICA" ? "modelica" : "openstudio"}.StepRun`; | ||
| const job = `alfalfa_worker.jobs.${sim_type === "MODELICA" ? "modelica" : "openstudio"}.step_run.StepRun`; | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. oh I see, now that I'm further along in the review. This corresponds to your module structure. |
||
| const params = { | ||
| run_id: run.ref_id, | ||
| start_datetime: startDatetime, | ||
|
|
@@ -297,7 +303,9 @@ class AlfalfaAPI { | |
|
|
||
| createRunFromModel = async (model) => { | ||
| const runId = uuidv1(); | ||
| const job = `alfalfa_worker.jobs.${model.model_name.endsWith(".fmu") ? "modelica" : "openstudio"}.CreateRun`; | ||
| const job = `alfalfa_worker.jobs.${ | ||
| model.model_name.endsWith(".fmu") ? "modelica" : "openstudio" | ||
| }.create_run.CreateRun`; | ||
| const params = { | ||
| model_id: model.ref_id, | ||
| run_id: runId | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,4 +1,4 @@ | ||
| FROM ghcr.io/nrel/alfalfa-dependencies:3.1.0 AS base | ||
| FROM ghcr.io/nrel/alfalfa-dependencies:prepare_080 AS base | ||
|
|
||
| ENV HOME=/alfalfa | ||
|
|
||
|
|
@@ -21,10 +21,7 @@ ENV PYTHONPATH="${HOME}:${PYTHONPATH}" | |
|
|
||
| COPY ./alfalfa_worker ${HOME}/alfalfa_worker | ||
|
|
||
| RUN pip3.8 install virtualenv \ | ||
| && pip3.8 install \ | ||
| scipy \ | ||
| symfit | ||
|
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The worker is no longer providing builtin python dependencies outside of what base alfalfa needs.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is partially due to removing the
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So in other words, if someone has an EnergyPlus model with Python EMS that uses third party Python modules, they're out of luck with Alfalfa. Is that right? It seems reasonable, as Alfalfa cannot anticipate every module that someone might want to use. Can we say that Alfalfa supports any module that comes bundled with EnergyPlus?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not quite. https://github.com/NREL/alfalfa/wiki/How-to-Migrate-EnergyPlus-Python-Plugins you can provide a
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, this is staying the same, correct? The E+ model with Python EMS + 3rd party Python modules just also needs to take responsibility for that requirements.txt. This makes sense to me and I think it's important to continue to support this in Alfalfa. |
||
| COPY ./alfalfa_worker /alfalfa/alfalfa_worker | ||
|
|
||
| COPY ./deploy /alfalfa/deploy | ||
| COPY ./deploy/wait-for-it.sh /usr/local/wait-for-it.sh | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3,14 +3,22 @@ | |
| import os | ||
| import sys | ||
| import traceback | ||
| from logging import StreamHandler, basicConfig | ||
| from pathlib import Path | ||
|
|
||
| # Determine which worker to load based on the QUEUE. | ||
| # This may be temporary for now, not sure on how else | ||
| # to determine which worker gets launched | ||
| from alfalfa_worker.dispatcher import Dispatcher | ||
| from alfalfa_worker.lib.constants import DATETIME_FORMAT | ||
|
|
||
| if __name__ == '__main__': | ||
|
|
||
| basicConfig(level=os.environ.get("LOGLEVEL", "INFO"), | ||
|
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Output logs at |
||
| handlers=[StreamHandler(sys.stdout)], | ||
| format='%(asctime)s - %(name)s - %(levelname)s: %(message)s', | ||
| datefmt=DATETIME_FORMAT) | ||
|
|
||
| try: | ||
| workdir = Path(os.environ.get('RUN_DIR', '/runs')) | ||
| dispatcher = Dispatcher(workdir) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,9 +1,4 @@ | ||
| import json | ||
| import os | ||
| from pathlib import Path | ||
| from uuid import uuid4 | ||
|
|
||
| from pyfmi import load_fmu | ||
|
|
||
| from alfalfa_worker.lib.enums import RunStatus, SimType | ||
| from alfalfa_worker.lib.job import Job | ||
|
|
@@ -19,7 +14,6 @@ def __init__(self, model_id, run_id=None): | |
| # Define FMU specific attributes | ||
| self.upload_fmu: Path = self.dir / model_name | ||
| self.fmu_path = self.dir / 'model.fmu' | ||
| self.fmu_json = self.dir / 'tags.json' | ||
| self.model_name = model_name | ||
|
|
||
| # Needs to be set after files are uploaded / parsed. | ||
|
|
@@ -34,99 +28,11 @@ def exec(self): | |
| """ | ||
| self.logger.info("add_fmu for {}".format(self.run.ref_id)) | ||
|
|
||
| # Create the FMU tags (no longer external now that python2 is deprecated) | ||
| self.create_tags() | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So the key change with all of these deleted lines of code, is that the FMU is supposed to come with the tags metadata baked into the FMU's resources directory. Is that correct? This seems to be aligned with the BOPTEST convention. |
||
| # insert tags into db | ||
| self.insert_fmu_tags() | ||
| self.upload_fmu.rename(self.fmu_path) | ||
|
|
||
| def validate(self) -> None: | ||
| assert (self.dir / 'model.fmu').exists(), "model file not created" | ||
| assert (self.dir / 'tags.json').exists(), "tags file not created" | ||
|
|
||
| def cleanup(self) -> None: | ||
| super().cleanup() | ||
| self.set_run_status(RunStatus.READY) | ||
|
|
||
| def get_site_ref(self, haystack_json): | ||
| """ | ||
| Find the site given the haystack JSON file. Remove 'r:' from string. | ||
| :param haystack_json: json serialized Haystack document | ||
| :return: site_ref: id of site | ||
| """ | ||
| site_ref = '' | ||
| with open(haystack_json) as json_file: | ||
| data = json.load(json_file) | ||
| for entity in data: | ||
| if 'site' in entity: | ||
| if entity['site'] == 'm:': | ||
| site_ref = entity['id'].replace('r:', '') | ||
| break | ||
| return site_ref | ||
|
|
||
| def insert_fmu_tags(self): | ||
| with open(self.fmu_json, 'r') as f: | ||
| data = f.read() | ||
| points_json = json.loads(data) | ||
|
|
||
| self.run_manager.add_site_to_mongo(points_json, self.run) | ||
|
|
||
| def create_tags(self): | ||
| # 1.0 setup the inputs | ||
| fmu = load_fmu(self.upload_fmu) | ||
|
|
||
| # 2.0 get input/output variables from the FMU | ||
| # causality = 1 is parameter, 2 is input, 3 is output | ||
| input_names = fmu.get_model_variables(causality=2).keys() | ||
| output_names = fmu.get_model_variables(causality=3).keys() | ||
|
|
||
| # 3.0 add site tagging | ||
| tags = [] | ||
|
|
||
| fmu_upload_name = os.path.basename(self.model_name) # without directories | ||
| fmu_upload_name = os.path.splitext(fmu_upload_name)[0] # without extension | ||
|
|
||
| # TODO: Figure out how to find geo_city | ||
| sitetag = { | ||
| "dis": "s:%s" % fmu_upload_name, | ||
| "id": "r:%s" % self.run.ref_id, | ||
| "site": "m:", | ||
| "datetime": "s:", | ||
| "simStatus": "s:Stopped", | ||
| "simType": "s:fmu", | ||
| "siteRef": "r:%s" % self.run.ref_id | ||
| } | ||
| tags.append(sitetag) | ||
|
|
||
| # 4.0 add input tagging | ||
| for var_input in input_names: | ||
| if not var_input.endswith("_activate"): | ||
| tag_input = { | ||
| "id": "r:%s" % uuid4(), | ||
| "dis": "s:%s" % var_input, | ||
| "siteRef": "r:%s" % self.run.ref_id, | ||
| "point": "m:", | ||
| "writable": "m:", | ||
| "writeStatus": "s:disabled", | ||
| "kind": "s:Number", | ||
| } | ||
| tags.append(tag_input) | ||
| tag_input = {} | ||
|
|
||
| # 5.0 add output tagging | ||
| for var_output in output_names: | ||
| tag_output = { | ||
| "id": "r:%s" % uuid4(), | ||
| "dis": "s:%s" % var_output, | ||
| "siteRef": "r:%s" % self.run.ref_id, | ||
| "point": "m:", | ||
| "cur": "m:", | ||
| "curVal": "n:", | ||
| "curStatus": "s:disabled", | ||
| "kind": "s:Number", | ||
| } | ||
| tags.append(tag_output) | ||
|
|
||
| # 6.0 write tags to the json file | ||
| with open(self.fmu_json, 'w') as outfile: | ||
| json.dump(tags, outfile) | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This would allow job/run logs to be accessed from the web api.