Skip to content

Commit baae84f

Browse files
committed
Change internal data and mask to private properties
1 parent d0bcdad commit baae84f

2 files changed

Lines changed: 61 additions & 61 deletions

File tree

ccdproc/combiner.py

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -184,14 +184,14 @@ def __init__(self, ccd_iter, dtype=None, xp=None):
184184

185185
# set up the data array
186186
# new_shape = (len(ccd_list),) + default_shape
187-
self.data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype)
187+
self._data_arr = xp.array([ccd.data for ccd in ccd_list], dtype=dtype)
188188

189189
# populate self.data_arr
190190
mask_list = [
191191
ccd.mask if ccd.mask is not None else xp.zeros(default_shape)
192192
for ccd in ccd_list
193193
]
194-
self.data_arr_mask = xp.array(mask_list, dtype=bool)
194+
self._data_arr_mask = xp.array(mask_list, dtype=bool)
195195

196196
# Must be after self.data_arr is defined because it checks the
197197
# length of the data array.
@@ -222,13 +222,13 @@ def weights(self, value):
222222
except TypeError as err:
223223
raise TypeError("weights must be an array.") from err
224224

225-
if value.shape != self.data_arr.shape:
225+
if value.shape != self._data_arr.shape:
226226
if value.ndim != 1:
227227
raise ValueError(
228228
"1D weights expected when shapes of the "
229229
"data and weights differ."
230230
)
231-
if value.shape[0] != self.data_arr.shape[0]:
231+
if value.shape[0] != self._data_arr.shape[0]:
232232
raise ValueError(
233233
"Length of weights not compatible with specified axis."
234234
)
@@ -258,9 +258,9 @@ def scaling(self, value):
258258
if value is None:
259259
self._scaling = value
260260
else:
261-
n_images = self.data_arr.shape[0]
261+
n_images = self._data_arr.shape[0]
262262
if callable(value):
263-
self._scaling = [value(self.data_arr[i]) for i in range(n_images)]
263+
self._scaling = [value(self._data_arr[i]) for i in range(n_images)]
264264
self._scaling = xp.array(self._scaling)
265265
else:
266266
try:
@@ -277,7 +277,7 @@ def scaling(self, value):
277277
)
278278
self._scaling = xp.array(value)
279279
# reshape so that broadcasting occurs properly
280-
for _ in range(len(self.data_arr.shape) - 1):
280+
for _ in range(len(self._data_arr.shape) - 1):
281281
self._scaling = self.scaling[:, xp.newaxis]
282282

283283
# set up IRAF-like minmax clipping
@@ -329,12 +329,12 @@ def clip_extrema(self, nlow=0, nhigh=0):
329329
if nhigh is None:
330330
nhigh = 0
331331

332-
argsorted = xp.argsort(self.data_arr, axis=0)
332+
argsorted = xp.argsort(self._data_arr, axis=0)
333333
# Not every array package has mgrid, so make it in numpy and convert it to the
334334
# array package used for the data.
335335
mg = xp.asarray(
336336
np_mgrid[
337-
[slice(ndim) for i, ndim in enumerate(self.data_arr.shape) if i > 0]
337+
[slice(ndim) for i, ndim in enumerate(self._data_arr.shape) if i > 0]
338338
]
339339
)
340340
for i in range(-1 * nhigh, nlow):
@@ -344,10 +344,10 @@ def clip_extrema(self, nlow=0, nhigh=0):
344344
# dimensions, so we need to flatten the mask array, set the mask
345345
# values for a flattened array, and then reshape it back to the
346346
# original shape.
347-
flat_index = np_ravel_multi_index(where, self.data_arr.shape)
348-
self.data_arr_mask = xp.reshape(
349-
xpx.at(xp.reshape(self.data_arr_mask, (-1,)))[flat_index].set(True),
350-
self.data_arr.shape,
347+
flat_index = np_ravel_multi_index(where, self._data_arr.shape)
348+
self._data_arr_mask = xp.reshape(
349+
xpx.at(xp.reshape(self._data_arr_mask, (-1,)))[flat_index].set(True),
350+
self._data_arr.shape,
351351
)
352352

353353
# set up min/max clipping algorithms
@@ -365,13 +365,13 @@ def minmax_clipping(self, min_clip=None, max_clip=None):
365365
Default is ``None``.
366366
"""
367367
if min_clip is not None:
368-
mask = self.data_arr < min_clip
368+
mask = self._data_arr < min_clip
369369
# Written to avoid in-place modification of array
370-
self.data_arr_mask = self.data_arr_mask | mask
370+
self._data_arr_mask = self._data_arr_mask | mask
371371
if max_clip is not None:
372-
mask = self.data_arr > max_clip
372+
mask = self._data_arr > max_clip
373373
# Written to avoid in-place modification of array
374-
self.data_arr_mask = self.data_arr_mask | mask
374+
self._data_arr_mask = self._data_arr_mask | mask
375375

376376
# set up sigma clipping algorithms
377377
@deprecated_renamed_argument(
@@ -430,10 +430,10 @@ def sigma_clipping(
430430
# Remove in 3.0
431431
_ = kwd.pop("use_astropy", True)
432432

433-
self.data_arr_mask = (
434-
self.data_arr_mask
433+
self._data_arr_mask = (
434+
self._data_arr_mask
435435
| sigma_clip(
436-
self.data_arr,
436+
self._data_arr,
437437
sigma_lower=low_thresh,
438438
sigma_upper=high_thresh,
439439
axis=kwd.get("axis", 0),
@@ -448,18 +448,18 @@ def sigma_clipping(
448448

449449
def _get_scaled_data(self, scale_arg):
450450
if scale_arg is not None:
451-
return self.data_arr * scale_arg
451+
return self._data_arr * scale_arg
452452
if self.scaling is not None:
453-
return self.data_arr * self.scaling
454-
return self.data_arr
453+
return self._data_arr * self.scaling
454+
return self._data_arr
455455

456456
def _get_nan_substituted_data(self, data):
457457
xp = self._xp
458458

459459
# Get the data as an unmasked array with masked values filled as NaN
460-
if self.data_arr_mask.any():
460+
if self._data_arr_mask.any():
461461
# Use array_api_extra so that we can use at with all array libraries
462-
data = xpx.at(data)[self.data_arr_mask].set(xp.nan)
462+
data = xpx.at(data)[self._data_arr_mask].set(xp.nan)
463463
else:
464464
data = data
465465
return data
@@ -478,7 +478,7 @@ def _combination_setup(self, user_func, default_func, scale_to):
478478
data = self._get_nan_substituted_data(data)
479479
masked_values = xp.isnan(data).sum(axis=0)
480480
else:
481-
masked_values = self.data_arr_mask.sum(axis=0)
481+
masked_values = self._data_arr_mask.sum(axis=0)
482482
combo_func = user_func
483483

484484
return data, masked_values, combo_func
@@ -532,7 +532,7 @@ def median_combine(
532532
medianed = median_func(data, axis=0)
533533

534534
# set the mask
535-
mask = masked_values == len(self.data_arr)
535+
mask = masked_values == len(self._data_arr)
536536

537537
# set the uncertainty
538538

@@ -549,7 +549,7 @@ def median_combine(
549549
# be an array of the same class as the data, so make sure it is
550550
uncertainty = xp.asarray(uncertainty)
551551
# Divide uncertainty by the number of pixel (#309)
552-
uncertainty /= xp.sqrt(len(self.data_arr) - masked_values)
552+
uncertainty /= xp.sqrt(len(self._data_arr) - masked_values)
553553
# Convert uncertainty to plain numpy array (#351)
554554
# There is no need to care about potential masks because the
555555
# uncertainty was calculated based on the data so potential masked
@@ -566,7 +566,7 @@ def median_combine(
566566
)
567567

568568
# update the meta data
569-
combined_image.meta["NCOMBINE"] = len(self.data_arr)
569+
combined_image.meta["NCOMBINE"] = len(self._data_arr)
570570

571571
# return the combined image
572572
return combined_image
@@ -653,7 +653,7 @@ def average_combine(
653653

654654
# calculate the mask
655655

656-
mask = masked_values == len(self.data_arr)
656+
mask = masked_values == len(self._data_arr)
657657

658658
# set up the deviation
659659
uncertainty = uncertainty_func(data, axis=0)
@@ -729,7 +729,7 @@ def sum_combine(
729729
summed = sum_func(data, axis=0)
730730

731731
# set up the mask
732-
mask = masked_values == len(self.data_arr)
732+
mask = masked_values == len(self._data_arr)
733733

734734
# set up the deviation
735735
uncertainty = uncertainty_func(data, axis=0)
@@ -749,7 +749,7 @@ def sum_combine(
749749
)
750750

751751
# update the meta data
752-
combined_image.meta["NCOMBINE"] = len(self.data_arr)
752+
combined_image.meta["NCOMBINE"] = len(self._data_arr)
753753

754754
# return the combined image
755755
return combined_image

ccdproc/tests/test_combiner.py

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -85,16 +85,16 @@ def test_combiner_create():
8585
ccd_data = ccd_data_func()
8686
ccd_list = [ccd_data, ccd_data, ccd_data]
8787
c = Combiner(ccd_list)
88-
assert c.data_arr.shape == (3, 100, 100)
89-
assert c.data_arr_mask.shape == (3, 100, 100)
88+
assert c._data_arr.shape == (3, 100, 100)
89+
assert c._data_arr_mask.shape == (3, 100, 100)
9090

9191

9292
# test if dtype matches the value that is passed
9393
def test_combiner_dtype():
9494
ccd_data = ccd_data_func()
9595
ccd_list = [ccd_data, ccd_data, ccd_data]
9696
c = Combiner(ccd_list, dtype=xp.float32)
97-
assert c.data_arr.dtype == xp.float32
97+
assert c._data_arr.dtype == xp.float32
9898
avg = c.average_combine()
9999
# dtype of average should match input dtype
100100
assert avg.dtype == c.dtype
@@ -114,9 +114,9 @@ def test_combiner_mask():
114114
ccd = CCDData(data, unit=u.adu, mask=mask)
115115
ccd_list = [ccd, ccd, ccd]
116116
c = Combiner(ccd_list)
117-
assert c.data_arr.shape == (3, 10, 10)
118-
assert c.data_arr_mask.shape == (3, 10, 10)
119-
assert not c.data_arr_mask[0, 5, 5]
117+
assert c._data_arr.shape == (3, 10, 10)
118+
assert c._data_arr_mask.shape == (3, 10, 10)
119+
assert not c._data_arr_mask[0, 5, 5]
120120

121121

122122
def test_weights():
@@ -158,7 +158,7 @@ def test_pixelwise_weights():
158158
CCDData(xp.zeros((10, 10)) + 1000, unit=u.adu),
159159
]
160160
combo = Combiner(ccd_list)
161-
combo.weights = xp.ones_like(combo.data_arr)
161+
combo.weights = xp.ones_like(combo._data_arr)
162162
combo.weights = xpx.at(combo.weights)[:, 5, 5].set(xp.array([1, 5, 10]))
163163
ccd = combo.average_combine()
164164
np_testing.assert_allclose(ccd.data[5, 5], 312.5)
@@ -188,7 +188,7 @@ def test_combiner_minmax_max():
188188

189189
c = Combiner(ccd_list)
190190
c.minmax_clipping(min_clip=None, max_clip=500)
191-
assert c.data_arr_mask[2].all()
191+
assert c._data_arr_mask[2].all()
192192

193193

194194
def test_combiner_minmax_min():
@@ -200,7 +200,7 @@ def test_combiner_minmax_min():
200200

201201
c = Combiner(ccd_list)
202202
c.minmax_clipping(min_clip=-500, max_clip=None)
203-
assert c.data_arr_mask[1].all()
203+
assert c._data_arr_mask[1].all()
204204

205205

206206
def test_combiner_sigmaclip_high():
@@ -216,7 +216,7 @@ def test_combiner_sigmaclip_high():
216216
c = Combiner(ccd_list)
217217
# using mad for more robust statistics vs. std
218218
c.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad)
219-
assert c.data_arr_mask[5].all()
219+
assert c._data_arr_mask[5].all()
220220

221221

222222
def test_combiner_sigmaclip_single_pix():
@@ -231,13 +231,13 @@ def test_combiner_sigmaclip_single_pix():
231231
combo = Combiner(ccd_list)
232232
# add a single pixel in another array to check that
233233
# that one gets rejected
234-
combo.data_arr = xpx.at(combo.data_arr)[0, 5, 5].set(0)
235-
combo.data_arr = xpx.at(combo.data_arr)[1, 5, 5].set(-5)
236-
combo.data_arr = xpx.at(combo.data_arr)[2, 5, 5].set(5)
237-
combo.data_arr = xpx.at(combo.data_arr)[3, 5, 5].set(-5)
238-
combo.data_arr = xpx.at(combo.data_arr)[4, 5, 5].set(25)
234+
combo._data_arr = xpx.at(combo._data_arr)[0, 5, 5].set(0)
235+
combo._data_arr = xpx.at(combo._data_arr)[1, 5, 5].set(-5)
236+
combo._data_arr = xpx.at(combo._data_arr)[2, 5, 5].set(5)
237+
combo._data_arr = xpx.at(combo._data_arr)[3, 5, 5].set(-5)
238+
combo._data_arr = xpx.at(combo._data_arr)[4, 5, 5].set(25)
239239
combo.sigma_clipping(high_thresh=3, low_thresh=None, func="median", dev_func=mad)
240-
assert combo.data_arr_mask[4, 5, 5]
240+
assert combo._data_arr_mask[4, 5, 5]
241241

242242

243243
def test_combiner_sigmaclip_low():
@@ -253,7 +253,7 @@ def test_combiner_sigmaclip_low():
253253
c = Combiner(ccd_list)
254254
# using mad for more robust statistics vs. std
255255
c.sigma_clipping(high_thresh=None, low_thresh=3, func="median", dev_func=mad)
256-
assert c.data_arr_mask[5].all()
256+
assert c._data_arr_mask[5].all()
257257

258258

259259
# test that the median combination works and returns a ccddata object
@@ -377,7 +377,7 @@ def test_combiner_with_scaling():
377377
np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data))
378378

379379
# Set the scaling manually...
380-
combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)]
380+
combiner.scaling = [scale_by_mean(combiner._data_arr[i]) for i in range(3)]
381381
avg_ccd = combiner.average_combine()
382382
np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean())
383383
assert avg_ccd.shape == ccd_data.shape
@@ -586,7 +586,7 @@ def test_average_combine_uncertainty():
586586
ccd_list = [ccd_data, ccd_data, ccd_data]
587587
c = Combiner(ccd_list)
588588
ccd = c.average_combine(uncertainty_func=xp.sum)
589-
uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3)
589+
uncert_ref = xp.sum(c._data_arr, 0) / xp.sqrt(3)
590590
np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref)
591591

592592
# Compare this also to the "combine" call
@@ -601,7 +601,7 @@ def test_median_combine_uncertainty():
601601
ccd_list = [ccd_data, ccd_data, ccd_data]
602602
c = Combiner(ccd_list)
603603
ccd = c.median_combine(uncertainty_func=xp.sum)
604-
uncert_ref = xp.sum(c.data_arr, 0) / xp.sqrt(3)
604+
uncert_ref = xp.sum(c._data_arr, 0) / xp.sqrt(3)
605605
np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref)
606606

607607
# Compare this also to the "combine" call
@@ -616,7 +616,7 @@ def test_sum_combine_uncertainty():
616616
ccd_list = [ccd_data, ccd_data, ccd_data]
617617
c = Combiner(ccd_list)
618618
ccd = c.sum_combine(uncertainty_func=xp.sum)
619-
uncert_ref = xp.sum(c.data_arr, 0) * xp.sqrt(3)
619+
uncert_ref = xp.sum(c._data_arr, 0) * xp.sqrt(3)
620620
np_testing.assert_allclose(ccd.uncertainty.array, uncert_ref)
621621

622622
# Compare this also to the "combine" call
@@ -798,8 +798,8 @@ def test_combiner_3d():
798798
ccd_list = [data1, data2, data3]
799799

800800
c = Combiner(ccd_list)
801-
assert c.data_arr.shape == (3, 5, 5, 5)
802-
assert c.data_arr_mask.shape == (3, 5, 5, 5)
801+
assert c._data_arr.shape == (3, 5, 5, 5)
802+
assert c._data_arr_mask.shape == (3, 5, 5, 5)
803803

804804
ccd = c.average_combine()
805805
assert ccd.shape == (5, 5, 5)
@@ -836,7 +836,7 @@ def test_3d_combiner_with_scaling():
836836
np_testing.assert_allclose(np_median(med_ccd), np_median(med_inp_data))
837837

838838
# Set the scaling manually...
839-
combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)]
839+
combiner.scaling = [scale_by_mean(combiner._data_arr[i]) for i in range(3)]
840840
avg_ccd = combiner.average_combine()
841841
np_testing.assert_allclose(avg_ccd.data.mean(), ccd_data.data.mean())
842842
assert avg_ccd.shape == ccd_data.shape
@@ -935,9 +935,9 @@ def test_clip_extrema_with_other_rejection():
935935
ccdlist[1].data = xpx.at(ccdlist[1].data)[2, 0].set(100.1)
936936
c = Combiner(ccdlist)
937937
# Reject ccdlist[1].data[1,2] by other means
938-
c.data_arr_mask = xpx.at(c.data_arr_mask)[1, 1, 2].set(True)
938+
c._data_arr_mask = xpx.at(c._data_arr_mask)[1, 1, 2].set(True)
939939
# Reject ccdlist[1].data[1,2] by other means
940-
c.data_arr_mask = xpx.at(c.data_arr_mask)[3, 0, 0].set(True)
940+
c._data_arr_mask = xpx.at(c._data_arr_mask)[3, 0, 0].set(True)
941941

942942
c.clip_extrema(nlow=1, nhigh=1)
943943
result = c.average_combine()
@@ -979,8 +979,8 @@ def create_gen():
979979
yield ccd_data
980980

981981
c = Combiner(create_gen())
982-
assert c.data_arr.shape == (3, 100, 100)
983-
assert c.data_arr_mask.shape == (3, 100, 100)
982+
assert c._data_arr.shape == (3, 100, 100)
983+
assert c._data_arr_mask.shape == (3, 100, 100)
984984

985985

986986
@pytest.mark.parametrize(
@@ -1057,7 +1057,7 @@ def sum_func(_, axis=axis):
10571057
return xp.sum(new_data, axis=axis)
10581058

10591059
expected_result = 3 * data
1060-
actual_result = c.sum_combine(sum_func=my_summer(c.data_arr, c.data_arr_mask))
1060+
actual_result = c.sum_combine(sum_func=my_summer(c._data_arr, c._data_arr_mask))
10611061
elif comb_func == "average_combine":
10621062
expected_result = data
10631063
actual_result = c.average_combine(scale_func=xp.mean)

0 commit comments

Comments
 (0)