diff --git a/README.md b/README.md
index 7b860b42..77eac3ed 100644
--- a/README.md
+++ b/README.md
@@ -31,7 +31,7 @@ data = osyris.Dataset(8, scale="au", path="data").load()
osyris.histogram2d(data["hydro"]["density"], data["hydro"]["B_field"],
norm="log", loglog=True)
```
-![hist2d](https://osyris.readthedocs.io/en/stable/_images/plotting_3_1.png)
+![hist2d](https://osyris.readthedocs.io/en/stable/_images/plotting_histograms_13_1.png)
Create a 2D gas density slice 2000 au wide through the plane normal to ``z``,
with velocity vectors overlayed as arrows, once again using ``layers``:
@@ -45,7 +45,7 @@ osyris.plane({"data": data["hydro"]["density"], "norm": "log"}, # layer 1
origin=center,
direction="z")
```
-![map2d](https://osyris.readthedocs.io/en/stable/_images/plotting_7_1.png)
+![map2d](https://osyris.readthedocs.io/en/stable/_images/plotting_maps_23_1.png)
## Have a problem or need a new feature?
diff --git a/docs/basics.ipynb b/docs/basics.ipynb
index 11f8683d..e25a6bda 100644
--- a/docs/basics.ipynb
+++ b/docs/basics.ipynb
@@ -148,7 +148,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Simple cut plane\n",
+ "## Simple 2D map\n",
"\n",
"The goal here is to create a 2D gas density slice, 2000 au wide, through the plane normal to `z`,\n",
"that passes through the center of the young star forming system.\n",
@@ -172,7 +172,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Once we have this center coordinate, we can use it as the origin for our cut plane:"
+ "Once we have this center coordinate, we can use it as the origin for our cut plane and create a map:"
]
},
{
@@ -181,24 +181,24 @@
"metadata": {},
"outputs": [],
"source": [
- "osyris.plane({\"data\": data[\"hydro\"][\"density\"], \"norm\": \"log\"},\n",
- " dx=2000 * osyris.units('au'),\n",
- " origin=center,\n",
- " direction=\"z\")"
+ "osyris.map({\"data\": data[\"hydro\"][\"density\"], \"norm\": \"log\"},\n",
+ " dx=2000 * osyris.units('au'),\n",
+ " origin=center,\n",
+ " direction=\"z\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "where the first argument is the variable to display, `dx` is the extent of the vieweing region,\n",
- "and `direction` is the normal to the slice plane."
+ "where the first argument is the variable to display, `dx` is the extent of the viewing region,\n",
+ "and `direction` is the normal to the slicing plane."
]
}
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -212,9 +212,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.10"
+ "version": "3.9.5"
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/docs/installation.ipynb b/docs/installation.ipynb
index 32038b8b..4b80f1d6 100644
--- a/docs/installation.ipynb
+++ b/docs/installation.ipynb
@@ -22,6 +22,40 @@
"$ pip install osyris"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If you have already installed `osyris` in the past, but would like to upgrade, use"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "$ pip install osyris --upgrade"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
\n",
+ "\n",
+ "**Warning**\n",
+ "\n",
+ "If you are upgrading from an old version to a version >= 2.6.0,\n",
+ "and you are getting some strange errors,\n",
+ "you may need to update your `config_osyris.py` configuration file in `/home/user/.osyris`.\n",
+ "\n",
+ "- If you had never touched the configuration file, it is safe to simply delete it (a new one will be created when importing `osyris`).\n",
+ "- If you had made changes to the configuration, the easiest is probably to move it to a new location/filename. Then import `osyris` and update the newly created file to incorporate the changes you had made previously.\n",
+ "\n",
+ "
"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -73,7 +107,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -87,7 +121,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.7"
+ "version": "3.9.5"
},
"nbsphinx": {
"execute": "never"
diff --git a/docs/loading_data.ipynb b/docs/loading_data.ipynb
index 3f4746f7..617f15b1 100644
--- a/docs/loading_data.ipynb
+++ b/docs/loading_data.ipynb
@@ -117,19 +117,21 @@
"metadata": {},
"outputs": [],
"source": [
- "osyris.plane({\"data\": data[\"hydro\"][\"density\"], \"norm\": \"log\"},\n",
- " dx=1000 * osyris.units('au'),\n",
- " origin=data[\"amr\"][\"xyz\"][np.argmax(data[\"hydro\"][\"density\"]).values],\n",
- " direction='z')"
+ "osyris.map({\"data\": data[\"hydro\"][\"density\"], \"norm\": \"log\"},\n",
+ " dx=1000 * osyris.units('au'),\n",
+ " origin=data[\"amr\"][\"xyz\"][np.argmax(data[\"hydro\"][\"density\"]).values],\n",
+ " direction='z')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Only loading certain variables\n",
+ "### Only loading certain groups or variables\n",
"\n",
- "It is also possible to skip a variable entirely by setting the key to `False` in the `select` dict:"
+ "It is also possible to select which groups to load.\n",
+ "The different groups are `'amr'`, `'hydro'`, `'grav'`, `'rt'`, and `'part'`.\n",
+ "For example, to load only the `amr` and `hydro` groups, use"
]
},
{
@@ -138,18 +140,21 @@
"metadata": {},
"outputs": [],
"source": [
- "data = osyris.Dataset(8, scale=\"au\", path=path).load(\n",
- " select={\"hydro\": {\"density\": False}})\n",
- "\"density\" in data[\"hydro\"]"
+ "data = osyris.Dataset(8, scale=\"au\", path=path).load(['amr', 'hydro'])\n",
+ "data.keys()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "Entire groups of variables can also be skipped.\n",
- "The different groups are `'amr'`, `'hydro'`, `'grav'` (or `'gravity'`), `'rt'`, and `'part'`.\n",
- "Loading all but the hydro variables can be achieved via"
+ "It is also possible to load or skip only a single variable in a group,\n",
+ "by setting the key to `True` or `False` in the `select` dict.\n",
+ "\n",
+ "The convention is:\n",
+ "\n",
+ "- If at least one key in a group is set to `True`, all unspecified variables in the group are skipped\n",
+ "- If a key is set to `False` in a group, all other variables are loaded"
]
},
{
@@ -159,8 +164,19 @@
"outputs": [],
"source": [
"data = osyris.Dataset(8, scale=\"au\", path=path).load(\n",
- " select={\"hydro\": False})\n",
- "data"
+ " select={\"hydro\": {\"density\": False}})\n",
+ "\"density\" in data[\"hydro\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data = osyris.Dataset(8, scale=\"au\", path=path).load(\n",
+ " select={\"hydro\": {\"temperature\": True, \"thermal_pressure\": True}})\n",
+ "data[\"hydro\"].keys()"
]
},
{
@@ -234,10 +250,10 @@
"metadata": {},
"outputs": [],
"source": [
- "osyris.plane({\"data\": data[\"hydro\"][\"density\"], \"norm\": \"log\"},\n",
- " dx=2000 * osyris.units(\"au\"),\n",
- " origin=data[\"amr\"][\"xyz\"][np.argmax(data[\"hydro\"][\"density\"]).values],\n",
- " direction='z')"
+ "osyris.map({\"data\": data[\"hydro\"][\"density\"], \"norm\": \"log\"},\n",
+ " dx=2000 * osyris.units(\"au\"),\n",
+ " origin=data[\"amr\"][\"xyz\"][np.argmax(data[\"hydro\"][\"density\"]).values],\n",
+ " direction='z')"
]
},
{
@@ -254,11 +270,95 @@
"\n",
""
]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Example: loading a region around a sink particle\n",
+ "\n",
+ "Combining some of the methods illustrated above,\n",
+ "we show here how to load only a small region 400 AU wide around a sink particle.\n",
+ "\n",
+ "We begin by loading only the sink particle data, by using `select='sink'` in the `load` method:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data = osyris.Dataset(8, scale=\"au\", path=path).load(select='sink')\n",
+ "data"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `sink` data group contains the positions of the sink particles.\n",
+ "We wish to load the region around the first sink particle, which will be the center of our domain."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "center = data['sink']['xyz'][0:1]\n",
+ "dx = 200 * osyris.units('au')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We are now in a position to load the rest of the data (`amr`, `hydro`, etc.)\n",
+ "and perform a spatial selection based on the sink's position\n",
+ "(note how the loading below is only looking through 6 files, as opposed to 12 at the top of the notebook,\n",
+ "because it uses the knowledge from the Hilbert space-filling curve to skip CPUs that are not connected\n",
+ "to the domain of interest)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data.load(\n",
+ " select={\"amr\": {\"x\": lambda x : np.logical_and(x > center.x - dx, x < center.x + dx),\n",
+ " \"y\": lambda y : np.logical_and(y > center.y - dx, y < center.y + dx),\n",
+ " \"z\": lambda z : np.logical_and(z > center.z - dx, z < center.z + dx)}\n",
+ " })"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, we can make a map of the gas density around our sink particle\n",
+ "(note that no spatial limits are specified in the `map` call,\n",
+ "it is making a map using all the cells that have been loaded)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "osyris.map(data[\"hydro\"][\"density\"],\n",
+ " {\"data\": data[\"sink\"][\"xyz\"], \"mode\": \"scatter\", \"c\": \"white\"},\n",
+ " norm='log', direction=\"z\", origin=center[0])"
+ ]
}
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -272,9 +372,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.1"
+ "version": "3.9.5"
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/setup.cfg b/setup.cfg
index 87acb1ee..effc72b8 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = osyris
-version = 2.5.1
+version = 2.6.0
author = Neil Vaytet
author_email = neil.vaytet@esss.se
description = A package to visualize AMR data from the RAMSES code
diff --git a/src/osyris/config/defaults.py b/src/osyris/config/defaults.py
index 23e291ea..79e0d368 100644
--- a/src/osyris/config/defaults.py
+++ b/src/osyris/config/defaults.py
@@ -17,7 +17,7 @@
}
-def get_unit(string, ud, ul, ut):
+def get_unit(string, ud, ul, ut, scale):
density = ud * (ureg.g / (ureg.cm**3))
velocity = (ul / ut) * (ureg.cm / ureg.s)
@@ -29,6 +29,11 @@ def get_unit(string, ud, ul, ut):
length = ul * ureg.cm
mass = density * (length**3)
+ scaling = length
+ if scale is not None:
+ scale = ureg(scale)
+ scaling = (length.to(scale) / scale).magnitude * scale
+
ramses_units = {
'density': density,
'velocity': velocity,
@@ -67,10 +72,16 @@ def get_unit(string, ud, ul, ut):
'radiative_energy_1': energy,
'temperature': 1.0 * ureg.K,
'time': time,
- 'x': length,
- 'y': length,
- 'z': length,
- 'dx': length,
+ 'x': scaling,
+ 'y': scaling,
+ 'z': scaling,
+ 'xyz_x': scaling,
+ 'xyz_y': scaling,
+ 'xyz_z': scaling,
+ 'position_x': scaling,
+ 'position_y': scaling,
+ 'position_z': scaling,
+ 'dx': scaling,
'mass': mass
}
diff --git a/src/osyris/io/amr.py b/src/osyris/io/amr.py
index 11bac033..3c8ac0ff 100644
--- a/src/osyris/io/amr.py
+++ b/src/osyris/io/amr.py
@@ -4,7 +4,6 @@
from .hilbert import hilbert_cpu_list
from .reader import Reader, ReaderKind
from .. import config
-from .. import units
from . import utils
@@ -14,58 +13,17 @@ def __init__(self):
self.cpu_list = None
def initialize(self, meta, select):
- length_unit = config.get_unit("x", meta["unit_d"], meta["unit_l"],
- meta["unit_t"])
- if meta["scale"] is not None:
- scale = units(meta["scale"])
- scaling = (length_unit.to(scale) / scale).magnitude * scale
- else:
- scaling = length_unit
-
- scaling = utils.get_spatial_scaling(meta["unit_d"], meta["unit_l"],
- meta["unit_t"], meta["scale"])
-
- if select is False:
- meta["lmax"] = 0
- return
-
- # AMR grid variables
- self.variables.update({
- "level": {
- "read": True,
- "type": "i",
- "buffer": None,
- "pieces": {},
- "unit": 1.0 * units.dimensionless
- },
- "cpu": {
- "read": True,
- "type": "i",
- "buffer": None,
- "pieces": {},
- "unit": 1.0 * units.dimensionless
- },
- "dx": {
- "read": True,
- "type": "d",
- "buffer": None,
- "pieces": {},
- "unit": scaling
- }
- })
- self.variables.update({
- "xyz_{}".format(c): {
- "read": True,
- "type": "d",
- "buffer": None,
- "pieces": {},
- "unit": scaling
- }
- for c in "xyz"[:meta["ndim"]]
- })
+ self.initialized = False
+
+ descriptor = {"level": "i", "cpu": "i", "dx": "d"}
+ descriptor.update({"xyz_{}".format(c): "d" for c in "xyz"[:meta["ndim"]]})
+
+ self.descriptor_to_variables(descriptor=descriptor, meta=meta, select=select)
self.cpu_list = hilbert_cpu_list(meta=meta,
- scaling=scaling,
+ scaling=config.get_unit(
+ "z", meta["unit_d"], meta["unit_l"],
+ meta["unit_t"], meta["scale"]),
select=select,
infofile=meta["infofile"])
self.initialized = True
@@ -191,15 +149,19 @@ def read_variables(self, ncache, ind, ilevel, cpuid, info):
content=self.bytes,
offsets=self.offsets)
- self.variables["level"]["buffer"]._array[:ncache, ind] = ilevel + 1
+ if self.variables["level"]["read"]:
+ self.variables["level"]["buffer"]._array[:ncache, ind] = ilevel + 1
for n in range(info["ndim"]):
key = "xyz_" + "xyz"[n]
- self.variables[key]["buffer"]._array[:ncache, ind] = (
- self.xg[:ncache, n] + self.xcent[ind, n] - self.meta["xbound"][n]
- ) * info["boxlen"] * self.variables[key]["unit"].magnitude
- self.variables["dx"]["buffer"]._array[:ncache, ind] = self.dxcell * info[
- "boxlen"] * self.variables["dx"]["unit"].magnitude
- self.variables["cpu"]["buffer"]._array[:ncache, ind] = cpuid + 1
+ if self.variables[key]["read"]:
+ self.variables[key]["buffer"]._array[:ncache, ind] = (
+ self.xg[:ncache, n] + self.xcent[ind, n] - self.meta["xbound"][n]
+ ) * info["boxlen"] * self.variables[key]["unit"].magnitude
+ if self.variables["dx"]["read"]:
+ self.variables["dx"]["buffer"]._array[:ncache, ind] = self.dxcell * info[
+ "boxlen"] * self.variables["dx"]["unit"].magnitude
+ if self.variables["cpu"]["read"]:
+ self.variables["cpu"]["buffer"]._array[:ncache, ind] = cpuid + 1
# Note: use lmax here instead of levelmax because the user might not
# want to load all levels. levelmax is always the max level in the
diff --git a/src/osyris/io/grav.py b/src/osyris/io/grav.py
index 70983423..937d95a1 100644
--- a/src/osyris/io/grav.py
+++ b/src/osyris/io/grav.py
@@ -2,7 +2,6 @@
# Copyright (c) 2022 Osyris contributors (https://github.com/nvaytet/osyris)
import os
from .reader import Reader, ReaderKind
-from .. import config
from . import utils
@@ -11,6 +10,10 @@ def __init__(self):
super().__init__(kind=ReaderKind.AMR)
def initialize(self, meta, select):
+ self.initialized = False
+ if select is False:
+ return
+
fname = utils.generate_fname(meta["nout"], meta["path"], ftype="grav", cpuid=1)
# Check if self-gravity files exist
if not os.path.exists(fname):
@@ -19,22 +22,8 @@ def initialize(self, meta, select):
descriptor = {"potential": "d"}
for n in range(meta["ndim"]):
descriptor["acceleration_" + "xyz"[n]] = "d"
- # Now add to the list of variables to be read
- for key in descriptor:
- read = True
- if isinstance(select, bool):
- read = select
- elif key in select:
- if isinstance(select[key], bool):
- read = select[key]
- self.variables[key] = {
- "read": read,
- "type": descriptor[key],
- "buffer": None,
- "pieces": {},
- "unit": config.get_unit(key, meta["unit_d"], meta["unit_l"],
- meta["unit_t"])
- }
+
+ self.descriptor_to_variables(descriptor=descriptor, meta=meta, select=select)
self.initialized = True
def read_header(self, info):
diff --git a/src/osyris/io/hilbert.py b/src/osyris/io/hilbert.py
index bdb5e53b..f7da1430 100644
--- a/src/osyris/io/hilbert.py
+++ b/src/osyris/io/hilbert.py
@@ -162,6 +162,8 @@ def _get_cpu_list(bounding_box, lmax, levelmax, infofile, ncpu, ndim):
def hilbert_cpu_list(meta, scaling, select, infofile):
if meta["ordering type"] != "hilbert":
return
+ if isinstance(select, bool):
+ return
bounding_box = {"xmin": 0, "xmax": 1, "ymin": 0, "ymax": 1, "zmin": 0, "zmax": 1}
# Make an array of cell centers according to lmax
box_size = (meta["boxlen"] * scaling).magnitude
diff --git a/src/osyris/io/hydro.py b/src/osyris/io/hydro.py
index 50fbedcb..ee57fdd4 100644
--- a/src/osyris/io/hydro.py
+++ b/src/osyris/io/hydro.py
@@ -3,7 +3,6 @@
import numpy as np
import os
from .reader import Reader, ReaderKind
-from .. import config
from . import utils
@@ -12,30 +11,24 @@ def __init__(self):
super().__init__(kind=ReaderKind.AMR)
def initialize(self, meta, select):
+ self.initialized = False
+ if select is False:
+ return
+
# Read the number of variables from the hydro_file_descriptor.txt
# and select the ones to be read if specified by user
fname = os.path.join(meta["infile"], "hydro_file_descriptor.txt")
try:
- descriptor = np.loadtxt(fname, dtype=str, delimiter=",")
+ desc_from_file = np.loadtxt(fname, dtype=str, delimiter=",")
except IOError:
return
- for i in range(len(descriptor)):
- key = descriptor[i, 1].strip()
- read = True
- if isinstance(select, bool):
- read = select
- elif key in select:
- if isinstance(select[key], bool):
- read = select[key]
- self.variables[key] = {
- "read": read,
- "type": descriptor[i, 2].strip(),
- "buffer": None,
- "pieces": {},
- "unit": config.get_unit(key, meta["unit_d"], meta["unit_l"],
- meta["unit_t"])
- }
+ descriptor = {
+ desc_from_file[i, 1].strip(): desc_from_file[i, 2].strip()
+ for i in range(len(desc_from_file))
+ }
+
+ self.descriptor_to_variables(descriptor=descriptor, meta=meta, select=select)
self.initialized = True
def read_header(self, info):
diff --git a/src/osyris/io/loader.py b/src/osyris/io/loader.py
index 1314deec..d2e4a2be 100644
--- a/src/osyris/io/loader.py
+++ b/src/osyris/io/loader.py
@@ -44,7 +44,7 @@ def load_metadata(self):
meta["nout"] = self.nout
meta["path"] = self.path
meta["time"] *= config.get_unit("time", meta["unit_d"], meta["unit_l"],
- meta["unit_t"])
+ meta["unit_t"], meta["scale"])
meta["ncells"] = 0
meta["nparticles"] = 0
return meta
@@ -54,33 +54,38 @@ def load(self, select=None, cpu_list=None, meta=None):
out = {}
groups = list(self.readers.keys())
- if select is None:
- select = {group: {} for group in self.readers}
- else:
+ _select = {group: {} for group in self.readers}
+ if isinstance(select, dict):
for key in select:
if key not in self.readers:
print("Warning: {} found in select is not a valid "
"Datagroup.".format(key))
- for group in self.readers:
- if group not in select:
- select[group] = {}
+ else:
+ _select[key] = select[key]
+ elif isinstance(select, str):
+ for key in _select:
+ if key != select:
+ _select[key] = False
+ elif isinstance(select, list) or isinstance(select, tuple):
+ for key in _select:
+ if key not in select:
+ _select[key] = False
# Take into account user specified lmax
meta["lmax"] = meta["levelmax"]
- if "amr" in select:
- if select["amr"]:
- if "level" in select["amr"]:
+ if "amr" in _select:
+ if _select["amr"]:
+ if "level" in _select["amr"]:
meta["lmax"] = utils.find_max_amr_level(levelmax=meta["levelmax"],
- select=select["amr"])
+ select=_select["amr"])
# Initialize readers
readers = {}
for group in groups:
- if not self.readers[group].initialized:
- first_load = self.readers[group].initialize(meta=meta,
- select=select[group])
- if first_load is not None:
- out[group] = first_load
+ loaded_on_init = self.readers[group].initialize(meta=meta,
+ select=_select[group])
+ if loaded_on_init is not None:
+ out[group] = loaded_on_init
if self.readers[group].initialized:
readers[group] = self.readers[group]
@@ -160,7 +165,7 @@ def load(self, select=None, cpu_list=None, meta=None):
conditions = {}
for group, reader in readers.items():
conditions.update(
- reader.make_conditions(select[group], ncache))
+ reader.make_conditions(_select[group], ncache))
# Combine all selection criteria together with AND
# operation by using a product on bools
sel = np.where(
diff --git a/src/osyris/io/part.py b/src/osyris/io/part.py
index 415fca22..f1ce66ae 100644
--- a/src/osyris/io/part.py
+++ b/src/osyris/io/part.py
@@ -3,7 +3,6 @@
import numpy as np
import os
from .reader import Reader, ReaderKind
-from .. import config
from ..core import Array
from . import utils
@@ -13,46 +12,29 @@ def __init__(self):
super().__init__(kind=ReaderKind.PART)
def initialize(self, meta, select):
+ self.initialized = False
+ if select is False:
+ return
+
# Read the number of variables from the hydro_file_descriptor.txt
# and select the ones to be read if specified by user
fname = os.path.join(meta["infile"], "part_file_descriptor.txt")
try:
- descriptor = np.loadtxt(fname, dtype=str, delimiter=",")
+ desc_from_file = np.loadtxt(fname, dtype=str, delimiter=",")
except IOError:
return
- scaling = utils.get_spatial_scaling(meta["unit_d"], meta["unit_l"],
- meta["unit_t"], meta["scale"])
-
- part_units = {
- 'position_x': scaling,
- 'position_y': scaling,
- 'position_z': scaling
+ descriptor = {
+ desc_from_file[i, 1].strip(): desc_from_file[i, 2].strip()
+ for i in range(len(desc_from_file))
}
- for i in range(len(descriptor)):
- key = descriptor[i, 1].strip()
- read = True
- if isinstance(select, bool):
- read = select
- elif key in select:
- if isinstance(select[key], bool):
- read = select[key]
- self.variables[key] = {
- "read":
- read,
- "type":
- descriptor[i, 2].strip(),
- "buffer":
- None,
- "pieces": {},
- "unit":
- part_units[key] if key in part_units else config.get_unit(
- key, meta["unit_d"], meta["unit_l"], meta["unit_t"])
- }
+ self.descriptor_to_variables(descriptor=descriptor, meta=meta, select=select)
self.initialized = True
def read_header(self, info):
+ if not self.initialized:
+ return
self.offsets["i"] += 2
self.offsets["n"] += 2
[nparticles] = utils.read_binary_data(fmt="i",
diff --git a/src/osyris/io/reader.py b/src/osyris/io/reader.py
index 1fa9375f..bfa6cdd3 100644
--- a/src/osyris/io/reader.py
+++ b/src/osyris/io/reader.py
@@ -1,6 +1,7 @@
import numpy as np
from . import utils
from ..core import Array
+from .. import config
from enum import Enum
@@ -19,6 +20,35 @@ def __init__(self, kind=None):
self.initialized = False
self.kind = kind
+ def descriptor_to_variables(self, descriptor, meta, select):
+ drop_others = False
+ if isinstance(select, dict):
+ for key, value in select.items():
+ if value is True:
+ drop_others = True
+
+ for key in descriptor:
+ read = True
+ if isinstance(select, bool):
+ read = select
+ elif key in select:
+ if isinstance(select[key], bool):
+ read = select[key]
+ elif drop_others:
+ read = False
+ self.variables[key] = {
+ "read":
+ read,
+ "type":
+ descriptor[key],
+ "buffer":
+ None,
+ "pieces": {},
+ "unit":
+ config.get_unit(key, meta["unit_d"], meta["unit_l"], meta["unit_t"],
+ meta["scale"])
+ }
+
def allocate_buffers(self, ngridmax, twotondim):
for item in self.variables.values():
if item["read"]:
diff --git a/src/osyris/io/rt.py b/src/osyris/io/rt.py
index 0c314ae1..547b1ec5 100644
--- a/src/osyris/io/rt.py
+++ b/src/osyris/io/rt.py
@@ -3,7 +3,6 @@
import numpy as np
import os
from .reader import Reader, ReaderKind
-from .. import config
class RtReader(Reader):
@@ -11,30 +10,24 @@ def __init__(self):
super().__init__(kind=ReaderKind.AMR)
def initialize(self, meta, select):
+ self.initialized = False
+ if select is False:
+ return
+
# Read the number of variables from the rt_file_descriptor.txt
# and select the ones to be read if specified by user
fname = os.path.join(meta["infile"], "rt_file_descriptor.txt")
try:
- descriptor = np.loadtxt(fname, dtype=str, delimiter=",")
+ desc_from_file = np.loadtxt(fname, dtype=str, delimiter=",")
except IOError:
return
- for i in range(len(descriptor)):
- key = descriptor[i, 1].strip()
- read = True
- if isinstance(select, bool):
- read = select
- elif key in select:
- if isinstance(select[key], bool):
- read = select[key]
- self.variables[key] = {
- "read": read,
- "type": descriptor[i, 2].strip(),
- "buffer": None,
- "pieces": {},
- "unit": config.get_unit(key, meta["unit_d"], meta["unit_l"],
- meta["unit_t"])
- }
+ descriptor = {
+ desc_from_file[i, 1].strip(): desc_from_file[i, 2].strip()
+ for i in range(len(desc_from_file))
+ }
+
+ self.descriptor_to_variables(descriptor=descriptor, meta=meta, select=select)
self.initialized = True
def read_header(self, info):
diff --git a/src/osyris/io/sink.py b/src/osyris/io/sink.py
index 4adee63b..d4e8ed2f 100644
--- a/src/osyris/io/sink.py
+++ b/src/osyris/io/sink.py
@@ -15,9 +15,9 @@ def __init__(self):
self.initialized = False
def initialize(self, meta, select):
- sink = Datagroup()
if select is False:
- return sink
+ return
+ sink = Datagroup()
sink_file = utils.generate_fname(meta["nout"],
meta["path"],
ftype="sink",
diff --git a/src/osyris/io/utils.py b/src/osyris/io/utils.py
index 548b7f39..4187262d 100644
--- a/src/osyris/io/utils.py
+++ b/src/osyris/io/utils.py
@@ -6,8 +6,6 @@
import struct
import numpy as np
from ..core import Array
-from .. import config
-from .. import units
def generate_fname(nout, path="", ftype="", cpuid=1, ext=""):
@@ -142,17 +140,3 @@ def find_max_amr_level(levelmax, select):
func_test = select["level"](possible_levels)
inds = np.argwhere(func_test).ravel()
return possible_levels[inds.max()]
-
-
-def get_spatial_scaling(ud, ul, ut, scale):
- """
- Compute the scaling factor to convert between code units and requested spatial
- scale.
- """
- length_unit = config.get_unit("x", ud, ul, ut)
- if scale is not None:
- scale = units(scale)
- scaling = (length_unit.to(scale) / scale).magnitude * scale
- else:
- scaling = length_unit
- return scaling
diff --git a/src/osyris/plot/map.py b/src/osyris/plot/map.py
index af40f111..d1c4d3c4 100644
--- a/src/osyris/plot/map.py
+++ b/src/osyris/plot/map.py
@@ -226,6 +226,8 @@ def map(*layers,
ymax = (datay + datadx).max().values
zmin = (dataz - datadx).min().values
zmax = (dataz + datadx).max().values
+ dx = (xmax - xmin) * datadx.unit.units
+ dy = (ymax - ymin) * datadx.unit.units
scalar_layer = []
to_binning = [] # contains the variables in cells close to the plane
diff --git a/src/osyris/plot/plane.py b/src/osyris/plot/plane.py
index 3a1ab9c0..e9e1da4e 100644
--- a/src/osyris/plot/plane.py
+++ b/src/osyris/plot/plane.py
@@ -7,7 +7,8 @@
def plane(*args, **kwargs):
"""
- Old alias for think map, will be deprecated soon.
+ Old deprecated alias for think map, will be removed soon.
"""
- warnings.warn("The plane function will be deprecated soon, use map instead.")
+ warnings.warn("The plane function is deprecated and will be removed soon, "
+ "use map instead.")
return _map(*args, dz=None, **kwargs)