Skip to content

Commit 490fb8d

Browse files
committed
cleanup comments in various funcs
1 parent e63cb23 commit 490fb8d

6 files changed

Lines changed: 12 additions & 16 deletions

File tree

pyhctsa/operations/correlation.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1719,7 +1719,7 @@ def _stat_av(y: ArrayLike, window_stat: str = 'mean', num_seg: int = 5, inc_move
17191719
logging.warning(f"Time-series of length {len(y)} is too short for {num_seg} windows")
17201720
return np.nan
17211721
inc = np.floor(win_length/inc_move) # increment to move at each step
1722-
# if incrment rounded down to zero, prop it up
1722+
# if increment rounded down to zero, prop it up
17231723
if inc == 0:
17241724
inc = 1
17251725

@@ -1829,7 +1829,7 @@ def autocorr_shape(y: ArrayLike, stop_when: Union[int, str] = 'pos_drown') -> di
18291829

18301830
# Check for good behavior
18311831
if np.any(np.isnan(acf)):
1832-
# This is an anomalous time series (e.g., all constant, or conatining NaNs)
1832+
# This is an anomalous time series (e.g., all constant, or containing NaNs)
18331833
out = np.nan
18341834

18351835
out = {}

pyhctsa/operations/information.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -403,7 +403,7 @@ def automutual_info(
403403
for k, delay in enumerate(time_delay):
404404
# check enough samples to compute automutual info
405405
if delay > n - min_samples:
406-
# time sereis too short - keep the remaining values as NaNs
406+
# time series too short - keep the remaining values as NaNs
407407
break
408408

409409
# form the time-delay vectors y1 and y2
@@ -679,7 +679,7 @@ def _rm_info(*args):
679679
return
680680

681681
# some initial tests on the input arguments
682-
x = np.array(args[0]) # make sure the imputs are in numpy array form
682+
x = np.array(args[0]) # make sure the inputs are in numpy array form
683683
y = np.array(args[1])
684684

685685
x_shape = x.shape
@@ -915,7 +915,7 @@ def _rm_histogram_2(*args):
915915
logging.warning("Invalid number of cells in Y dimension")
916916

917917
if upperx <= lowerx:
918-
logging.warning("Ivalid bounds in X dimension")
918+
logging.warning("Invalid bounds in X dimension")
919919

920920
if uppery <= lowery:
921921
logging.warning("Invalid bounds in Y dimension")

pyhctsa/operations/medical.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,6 @@ def pol_var(x: ArrayLike, d: float = 1, D: int = 6) -> float:
271271
i = 0
272272
pc = 0
273273

274-
# seqcnt = 0
275274
while i <= (N-D):
276275
x_seq = x_sym[i:(i+D)]
277276
if np.array_equal(x_seq, z_seq) or np.array_equal(x_seq, o_seq):

pyhctsa/operations/model_fit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -307,7 +307,7 @@ def local_simple(y: ArrayLike, forecast_meth: str = 'mean',
307307
Returns
308308
-------
309309
dict
310-
Dictionary containing output statistics on the residuals of the simple fortecasting method.
310+
Dictionary containing output statistics on the residuals of the simple forecasting method.
311311
312312
"""
313313
y = np.asarray(y)
@@ -316,7 +316,7 @@ def local_simple(y: ArrayLike, forecast_meth: str = 'mean',
316316
if train_length == 'ac':
317317
lp = first_crossing(y, 'ac', 0, 'discrete')
318318
else:
319-
#the e length of the subsegment preceeding to use to predict the subsequent value
319+
#the e length of the subsegment preceding to use to predict the subsequent value
320320
lp = train_length
321321
evalr = np.arange(lp, N) #range over which to evaluate the forecast
322322
if np.size(evalr) == 0:

pyhctsa/operations/stationarity.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -55,14 +55,11 @@ def local_distributions(y: ArrayLike, num_segs: int = 5, each_or_par: str = 'par
5555
start_idx = i * lseg
5656
end_idx = (i + 1) * lseg
5757
segment_data = y[start_idx:end_idx]
58-
#kde = KDEUnivariate(segment_data)
5958
kde = gaussian_kde(segment_data, bw_method="scott")
60-
#kde.fit(bw="scott") # tune bw adjustment factor empiricially?
6159
dns[:, i] = kde.evaluate(r)
6260
# Compare the local distributions
6361
if each_or_par in ["par", "parent"]:
6462
#Compares each subdistribtuion to the parent (full signal) distribution
65-
#kde = KDEUnivariate(y).fit(bw="scott")
6663
kde = gaussian_kde(y, bw_method="scott")
6764
pardn = kde.evaluate(r)
6865
divs = np.zeros(num_segs)
@@ -551,7 +548,7 @@ def range_evolve(y: ArrayLike) -> dict:
551548

552549
fullr = np.ptp(y)
553550

554-
# return number of unqiue entries in a vector, x
551+
# return number of unique entries in a vector, x
555552
lunique = lambda x : len(np.unique(x))
556553
out['totnuq'] = lunique(cums)
557554

@@ -1081,7 +1078,7 @@ def sliding_window(y: ArrayLike, window_stat: str = 'mean', across_win_stat: str
10811078
return out
10821079

10831080
def _get_window(step_ind, inc, win_length):
1084-
# helper funtion to convert a step index (stepInd) to a range of indices corresponding to that window
1081+
# helper function to convert a step index (stepInd) to a range of indices corresponding to that window
10851082
start_idx = (step_ind) * inc
10861083
end_idx = (step_ind) * inc + win_length
10871084

pyhctsa/operations/symbolic.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def surprise(y: ArrayLike, what_prior: str = 'dist', memory: float = 0.2, num_gr
8585
store[i] = p
8686
elif what_prior == 'T1':
8787
# uses one-point correlations in memory to inform the next point
88-
# estimate transition probabilites from data in memory
88+
# estimate transition probabilities from data in memory
8989
# find where in memory this has been observbed before, and preceded it
9090
memory_data = yth[rs[0, i] - memory:rs[0, i]]
9191
inmem = np.where(memory_data[:-1] == yth[rs[0, i] - 1])[0]
@@ -124,7 +124,7 @@ def surprise(y: ArrayLike, what_prior: str = 'dist', memory: float = 0.2, num_gr
124124
out['min'] = np.nan
125125

126126
# Calculate statistics
127-
out['max'] = np.max(store) # maximum amount of information you cna gain in this way
127+
out['max'] = np.max(store) # maximum amount of information you can gain in this way
128128
out['mean'] = np.mean(store)
129129
out['sum'] = np.sum(store)
130130
out['median'] = np.median(store)
@@ -587,7 +587,7 @@ def transition_matrix(y: ArrayLike, how_to_cg: str = 'quantile',
587587
num_groups : int, optional
588588
number of groups in the course-graining. Default is 2.
589589
tau : int or str, optional
590-
analyze transition matricies corresponding to this lag. We
590+
analyze transition matrices corresponding to this lag. We
591591
could either downsample the time series at this lag and then do the
592592
discretization as normal, or do the discretization and then just
593593
look at this dicrete lag. Here we do the former. Can also set tau to 'ac'

0 commit comments

Comments
 (0)