Skip to content

Commit cc88ad3

Browse files
author
Philip Adenekan
committed
unit-test-and-ci-updates
1 parent a9bb879 commit cc88ad3

File tree

15 files changed

+437
-222
lines changed

15 files changed

+437
-222
lines changed

.editorconfig

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
# EditorConfig is awesome: https://EditorConfig.org
2+
3+
# top-most EditorConfig file
4+
root = true
5+
6+
# Unix-style newlines with a newline ending every file
7+
[*]
8+
end_of_line = lf
9+
insert_final_newline = true
10+
11+
# Matches multiple files with brace expansion notation
12+
# Set default charset
13+
[*.{js,py}]
14+
charset = utf-8
15+
16+
# 4 space indentation
17+
[*.py]
18+
indent_style = space
19+
indent_size = 4
20+
21+
# Tab indentation (no size specified)
22+
[Makefile]
23+
indent_style = tab

.github/workflows/pylint.yml

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
name: Pylint
2+
3+
on: [push]
4+
5+
jobs:
6+
build:
7+
runs-on: ubuntu-latest
8+
strategy:
9+
matrix:
10+
python-version: ["3.6"]
11+
steps:
12+
- uses: actions/checkout@v2
13+
- name: Set up Python ${{ matrix.python-version }}
14+
uses: actions/setup-python@v2
15+
with:
16+
python-version: ${{ matrix.python-version }}
17+
- name: Install dependencies
18+
run: |
19+
python -m pip install --upgrade pip
20+
pip install pylint
21+
- name: Analysing the code with pylint
22+
run: |
23+
pylint rec_to_binaries/

.github/workflows/unit_test.yml

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
name: Project Tests
2+
3+
on: [push]
4+
5+
jobs:
6+
build:
7+
runs-on: ubuntu-latest
8+
defaults:
9+
run:
10+
shell: bash -l {0}
11+
strategy:
12+
matrix:
13+
python-version: ["3.6"]
14+
steps:
15+
- uses: actions/checkout@v2
16+
- name: Set up Python ${{ matrix.python-version }}
17+
uses: actions/setup-python@v2
18+
with:
19+
python-version: ${{ matrix.python-version }}
20+
- uses: conda-incubator/setup-miniconda@v2
21+
with:
22+
miniconda-version: "latest"
23+
activate-environment: rec_to_binaries
24+
environment-file: environment.yml
25+
run: |
26+
pytest -vv

.pylintrc

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#options - https://github.com/Qiskit/openqasm/blob/master/.pylintrc
2+
# https://pylint.pycqa.org/en/latest/technical_reference/features.html
3+
4+
5+
[MESSAGES CONTROL]
6+
disable=
7+
missing-docstring,
8+
too-few-public-methods,
9+
too-many-instance-attributes,
10+
line-too-long,
11+
too-many-arguments,
12+
logging-fstring-interpolation,
13+
consider-using-f-string, # consider removing in the future
14+
import-error, # consider removing after finding a better solution
15+
16+
17+
[TYPECHECK]
18+
ignored-modules = numpy, pandas, scipy.stats
19+
20+
21+
[BASIC]
22+
# Good variable names which should always be accepted, separated by a comma
23+
good-names=_, id
24+
25+
# Bad variable names which should always be refused, separated by a comma
26+
bad-names=foo,bar,baz,toto,tutu,tata
27+
28+
# Regular expression matching correct attribute names
29+
attr-rgx=[A-Za-z_][A-Za-z0-9_]{0,30}$
30+
31+
# Naming hint for attribute names
32+
attr-name-hint=[A-Za-z_][A-Za-z0-9_]{0,30}$
33+
34+
# Regular expression matching correct argument names
35+
argument-rgx=[A-Za-z_][A-Za-z0-9_]{0,30}$
36+
37+
# Naming hint for argument names
38+
argument-name-hint=[A-Za-z_][A-Za-z0-9_]{0,30}$
39+
40+
# Regular expression matching correct variable names
41+
variable-rgx=[A-Za-z_][A-Za-z0-9_]{0,30}$
42+
43+
# Naming hint for variable names
44+
variable-name-hint=[A-Za-z_][A-Za-z0-9_]{0,30}$

.vscode/settings.json

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"python.linting.pylintEnabled": true,
3+
"python.linting.enabled": true,
4+
"python.linting.flake8Enabled": false
5+
}

environment.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ dependencies:
1414
- pytest
1515
- pytest-cov
1616
- coveralls
17+
- Faker
1718
- pip
1819
- pip:
1920
- mountainlab-pytools

rec_to_binaries/adjust_timestamps.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,10 @@
1010

1111
import numpy as np
1212
import pandas as pd
13+
from scipy.stats import linregress
1314
from rec_to_binaries.create_system_time import infer_systime
1415
from rec_to_binaries.read_binaries import (readTrodesExtractedDataFile,
1516
write_trodes_extracted_datafile)
16-
from scipy.stats import linregress
1717

1818
logger = getLogger(__name__)
1919

@@ -53,28 +53,28 @@ def _regress_timestamps(trodestime, systime):
5353
Unix time
5454
5555
"""
56-
NANOSECONDS_TO_SECONDS = 1E9
56+
nanoseconds_to_seconds = 1E9
5757

5858
# Convert
5959
systime_seconds = np.asarray(systime).astype(
60-
np.float64) / NANOSECONDS_TO_SECONDS
60+
np.float64) / nanoseconds_to_seconds
6161
trodestime_index = np.asarray(trodestime).astype(np.float64)
6262

63-
slope, intercept, r_value, p_value, std_err = linregress(
63+
slope, intercept = linregress(
6464
trodestime_index, systime_seconds)
6565
adjusted_timestamps = intercept + slope * trodestime_index
66-
return (adjusted_timestamps * NANOSECONDS_TO_SECONDS).astype(np.int64)
66+
return (adjusted_timestamps * nanoseconds_to_seconds).astype(np.int64)
6767

6868

69-
def _insert_new_data(data_file, df):
69+
def _insert_new_data(data_file, data_frame):
7070
"""
7171
Replaces the `data` in the extracted data file with a new one.
7272
7373
Parameters
7474
----------
7575
data_file : dict
7676
Original data file as read in by `readTrodesExtractedDataFile`
77-
df : pandas.DataFrame
77+
data_frame : pandas.DataFrame
7878
New data
7979
8080
Returns
@@ -84,7 +84,7 @@ def _insert_new_data(data_file, df):
8484
8585
"""
8686
new_data_file = data_file.copy()
87-
new_data_file['data'] = np.asarray(df.to_records(index=False))
87+
new_data_file['data'] = np.asarray(data_frame.to_records(index=False))
8888
new_data_file['fields'] = ''.join(
8989
[f'<{name} {dtype}>'
9090
for name, (dtype, _) in new_data_file['data'].dtype.fields.items()])
@@ -113,7 +113,7 @@ def fix_timestamp_lag(continuoustime_filename):
113113
data_file = readTrodesExtractedDataFile(continuoustime_filename)
114114

115115
if 'systime' not in data_file['data'].dtype.names:
116-
logger.warn("No `systime`. Inferring from `system_time_at_creation` timestamp"
116+
logger.warning("No `systime`. Inferring from `system_time_at_creation` timestamp"
117117
" as a function of the `clockrate` and `trodestime`")
118118
new_data = infer_systime(data_file)
119119
else:

rec_to_binaries/core.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ def extract_trodes_rec_file(data_dir,
3434
parallel_instances=1,
3535
use_day_config=True,
3636
trodes_version=None):
37+
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
3738
"""Extracting Trodes rec files.
3839
3940
Following the Frank Lab directory structure for raw ephys data, will
@@ -205,11 +206,11 @@ def extract_trodes_rec_file(data_dir,
205206
use_day_config=use_day_config)
206207

207208
if adjust_timestamps_for_mcu_lag:
208-
''''There is some jitter in the arrival times of packets from the MCU (as
209-
reflected in the sysclock records in the .rec file. If we assume that
210-
the Trodes clock is actually regular, and that any episodes of lag are
211-
fairly sporadic, we can recover the correspondence between trodestime
212-
and system (wall) time.'''
209+
# There is some jitter in the arrival times of packets from the MCU (as
210+
# reflected in the sysclock records in the .rec file. If we assume that
211+
# the Trodes clock is actually regular, and that any episodes of lag are
212+
# fairly sporadic, we can recover the correspondence between trodestime
213+
# and system (wall) time.
213214
preprocessing_dir = animal_info.get_preprocessing_dir()
214215
filenames = glob.glob(os.path.join(
215216
preprocessing_dir, '**', '*.continuoustime.dat'), recursive=True)
@@ -243,9 +244,7 @@ def convert_binaries_to_hdf5(data_dir, animal, out_dir=None, dates=None,
243244
convert_lfp=True,
244245
convert_pos=True,
245246
convert_spike=True):
246-
animal_info = td.TrodesAnimalInfo(
247-
data_dir, animal, out_dir=out_dir, dates=dates)
248-
"""Converting preprocessed binaries into HDF5 files.
247+
'''Converting preprocessed binaries into HDF5 files.
249248
250249
Assume that preprocessing has already been completed using (for example)
251250
extract_trodes_rec_file.
@@ -266,7 +265,9 @@ def convert_binaries_to_hdf5(data_dir, animal, out_dir=None, dates=None,
266265
convert_lfps : bool, optional
267266
convert_dio : bool, optional
268267
convert_mda : bool, optional
269-
"""
268+
'''
269+
animal_info = td.TrodesAnimalInfo(
270+
data_dir, animal, out_dir=out_dir, dates=dates)
270271

271272
importer = td.TrodesPreprocessingToAnalysis(animal_info)
272273

rec_to_binaries/create_system_time.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,14 +41,14 @@ def create_systime(clockrate, data, system_time_at_creation):
4141
millisecond
4242
4343
"""
44-
NANOSECONDS_TO_SECONDS = 1e9
44+
nanoseconds_to_seconds = 1e9
4545

4646
clockrate = int(clockrate)
4747
n_time = data.shape[0]
4848
system_time_at_creation = pd.to_datetime(
4949
int(system_time_at_creation), unit='ms').value
5050
end = (system_time_at_creation +
51-
int((n_time - 1) * NANOSECONDS_TO_SECONDS / clockrate))
51+
int((n_time - 1) * nanoseconds_to_seconds / clockrate))
5252

5353
systime = pd.date_range(
5454
start=system_time_at_creation,

rec_to_binaries/read_binaries.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import numpy as np
55

66

7-
def readTrodesExtractedDataFile(filename):
7+
def readTrodesExtractedDataFile(filename): # pylint: disable=invalid-name
88
'''Read extracted trodes binary.
99
1010
Parameters
@@ -20,24 +20,24 @@ def readTrodesExtractedDataFile(filename):
2020
# Check if first line is start of settings block
2121
if file.readline().decode().strip() != '<Start settings>':
2222
raise Exception("Settings format not supported")
23-
fieldsText = dict()
23+
fields_text = {}
2424
for line in file:
2525
# Read through block of settings
2626
line = line.decode().strip()
2727
# filling in fields dict
2828
if line != '<End settings>':
2929
settings_name, setting = line.split(': ')
30-
fieldsText[settings_name.lower()] = setting
30+
fields_text[settings_name.lower()] = setting
3131
# End of settings block, signal end of fields
3232
else:
3333
break
3434
# Reads rest of file at once, using dtype format generated by parse_dtype()
3535
try:
36-
fieldsText['data'] = np.fromfile(
37-
file, dtype=parse_dtype(fieldsText['fields']))
36+
fields_text['data'] = np.fromfile(
37+
file, dtype=parse_dtype(fields_text['fields']))
3838
except KeyError:
39-
fieldsText['data'] = np.fromfile(file)
40-
return fieldsText
39+
fields_text['data'] = np.fromfile(file)
40+
return fields_text
4141

4242

4343
def parse_dtype(fieldstr):
@@ -46,7 +46,7 @@ def parse_dtype(fieldstr):
4646
Returns: np.dtype
4747
'''
4848
# Returns np.dtype from field string
49-
sep = re.split('\s', re.sub(r"\>\<|\>|\<", ' ', fieldstr).strip())
49+
sep = re.split('\s', re.sub(r"\>\<|\>|\<", ' ', fieldstr).strip()) # pylint: disable=anomalous-backslash-in-string
5050
typearr = []
5151

5252
# Every two elemets is fieldname followed by datatype
@@ -56,7 +56,7 @@ def parse_dtype(fieldstr):
5656
ftype = 'uint32'
5757
# Finds if a <num>* is included in datatype
5858
if '*' in sep[i + 1]:
59-
temptypes = re.split('\*', sep[i + 1])
59+
temptypes = re.split('\*', sep[i + 1]) # pylint: disable=anomalous-backslash-in-string
6060
# Results in the correct assignment, whether str is num*dtype or dtype*num
6161
ftype = temptypes[temptypes[0].isdigit()]
6262
repeats = int(temptypes[temptypes[1].isdigit()])

0 commit comments

Comments
 (0)