Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: "v5.0.0"
rev: "v6.0.0"
hooks:
- id: check-added-large-files
- id: check-case-conflict
Expand All @@ -16,7 +16,7 @@ repos:
- id: trailing-whitespace

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.12.5"
rev: "v0.12.9"
hooks:
# id: ruff-check would go here if using both
- id: ruff-format
Expand Down
1,759 changes: 879 additions & 880 deletions illustration_lightcurve.ipynb

Large diffs are not rendered by default.

27 changes: 13 additions & 14 deletions src/lightcurves/LC.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,13 @@

import logging
import pickle
from pathlib import Path

import astropy
import astropy.stats.bayesian_blocks as bblocks
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.axes import Axes # for type hints only
from pathlib import Path


# https://docs.astropy.org/en/stable/api/astropy.stats.bayesian_blocks.html
import lightcurves.HopFinder as hf
Expand Down Expand Up @@ -37,7 +36,8 @@ def load_lc(path: str | Path) -> LightCurve:
with path.open("rb") as f:
return pickle.load(f)

'''

'''
this following is exactly like the previous one.. might not be necessary
def load_lc_npy(path: str | Path) -> LightCurve:
"""
Expand All @@ -52,6 +52,7 @@ def load_lc_npy(path: str | Path) -> LightCurve:
return pickle.load(f)
'''


def load_lc_csv(path: str) -> LightCurve:
"""
Load a pickled LightCurve instance from a CSV file saved with `save_csv()`.
Expand Down Expand Up @@ -153,8 +154,8 @@ def clean_data(
ts_ = ts[nan_mask]
ts_clean = ts_[time_unique_id]
return (time_unique, flux_clean, flux_error_clean, ts_clean)
else:
return (time_unique, flux_clean, flux_error_clean, None)
return (time_unique, flux_clean, flux_error_clean, None)


def get_gti_iis(
time: np.ndarray, n_gaps: int, n_pick: int | None
Expand Down Expand Up @@ -316,7 +317,7 @@ def __init__(
friendly_error = "Input arrays do not have same length"
raise ValueError(friendly_error)
if len(flux[np.isnan(flux)]) > 0 or len(flux_error[np.isnan(flux_error)]) > 0:
friendly_error = "flux or flux_error contain np.nan values"
friendly_error = "flux or flux_error contain np.nan values"
raise TypeError(friendly_error)
if len(time) != len(np.unique(time)):
friendly_error = "time contains duplicate values"
Expand Down Expand Up @@ -422,7 +423,7 @@ def save_npy(self, path: str) -> None:
Use `load_lc_npy()` to read this file.
This does not update `LC.py`, it saves current object state.
TBD: actaully since this is an object it just saves a pickle that could be called .npy
the save npy business needs to be revisited I think it might be nonesense
the save npy business needs to be revisited I think it might be nonesense
"""
path = Path(path)
with path.open("wb") as pickle_file:
Expand Down Expand Up @@ -508,7 +509,7 @@ def plot_lc(
axtop.set_xbound(ax.get_xbound())
axtop.set_xlim(ax.get_xlim())
format_labels = astropy.time.Time(
list(t for t in ax.get_xticks()), format=self.time_format
list(ax.get_xticks()), format=self.time_format
)
if new_time_format == "isot":
new_labels = [
Expand Down Expand Up @@ -784,9 +785,8 @@ def get_bblocks_above(
self.block_val > threshold, self.block_val, threshold
)
except AttributeError:
raise AttributeError(
"Initialize Bayesian blocks with lc.get_bblocks() first!"
)
msg = "Initialize Bayesian blocks with lc.get_bblocks() first!"
raise AttributeError(msg) from err

# Merge neighbouring threshold blocks and delete edges
block_mask = np.ones(len(self.block_val), dtype=bool)
Expand Down Expand Up @@ -857,9 +857,8 @@ def plot_bblocks(
**kwargs,
)
except AttributeError:
raise AttributeError(
"Initialize Bayesian blocks with .get_bblocks() first!"
)
msg = "Initialize Bayesian blocks with .get_bblocks() first!"
raise AttributeError(msg) from err

# -------------------------------------------------------------------------
def bb_i(self, t: float):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ def clean(self, peaks, starts, ends, lc):
logging.info("not variable enough, no peak found")
return (None, None, None)
if self.lc_edges == "neglect" and (len(starts) < 1 or len(ends) < 1):
logging.info("not variable enough, missing start or end")
return (None, None, None)
logging.info("not variable enough, missing start or end")
return (None, None, None)
if self.lc_edges == "add":
if len(starts) < 1:
starts = np.insert(starts, 0, lc.edges[0])
Expand Down Expand Up @@ -199,8 +199,8 @@ def find_peaks(self, lc):
for i in range(1, len(diff)):
# if previous rising; this falling
if (diff[i - 1] > 0 and diff[i] < 0) and (lc.block_val[i] > lc.baseline):
# peak_time = middle of peak block
peaks.append(lc.edges[i] + (lc.edges[i + 1] - lc.edges[i]) / 2)
# peak_time = middle of peak block
peaks.append(lc.edges[i] + (lc.edges[i + 1] - lc.edges[i]) / 2)
return peaks

def find_start_end(self, lc):
Expand Down