Skip to content

Commit 02eb5b0

Browse files
authored
Merge pull request #96 from NeurodataWithoutBorders/elision_update
Elision update
2 parents fc9064e + fc17513 commit 02eb5b0

File tree

3 files changed

+290
-216
lines changed

3 files changed

+290
-216
lines changed

+util/loadTrialAlignedTimeSeriesData.m

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
electrode = [];
2222
end
2323

24-
trials = nwb.intervals.get('trials');
24+
trials = nwb.intervals_trials;
2525

2626
times = trials.start_time.data.load;
2727

tutorials/ecephys.m

Lines changed: 63 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
%
44
% author: Ben Dichter
55
6-
% last edited: Dec 17, 2018
6+
% last edited: Jan 22, 2019
77

88
%% NWB file
99
% All contents get added to the NWB file, which is created with the
@@ -71,19 +71,21 @@
7171
end
7272
end
7373
%%
74-
% add the |DynamicTable| object to the NWB file using the name |'electrodes'| (not flexible)
74+
% add the |DynamicTable| object to the NWB file in
75+
% /general/extracellular_ephys/electrodes
7576

7677
tbl.Properties.Description = 'my description';
7778

7879
electrode_table = util.table2nwb(tbl);
79-
nwb.general_extracellular_ephys.set('electrodes', electrode_table);
80+
nwb.general_extracellular_ephys_electrodes = electrode_table;
8081

81-
%% LFP
82-
% In order to write LFP, you need to construct a region view of the electrode
83-
% table to link the signal to the electrodes that generated them. You must do
84-
% this even if the signal is from all of the electrodes. Here we will create
85-
% a reference that includes all electrodes. Then we will randomly generate a
86-
% signal 1000 timepoints long from 10 channels
82+
%% Multielectrode recording
83+
% In order to write a multielectrode recording, you need to construct a
84+
% region view of the electrode table to link the signal to the electrodes
85+
% that generated them. You must do this even if the signal is from all of
86+
% the electrodes. Here we will create a reference that includes all
87+
% electrodes. Then we will generate a signal 1000 timepoints long from 10
88+
% channels.
8789

8890
ov = types.untyped.ObjectView('/general/extracellular_ephys/electrodes');
8991

@@ -93,8 +95,10 @@
9395

9496
%%
9597
% once you have the |ElectrodeTableRegion| object, you can create an
96-
% ElectricalSeries object to hold your LFP data. Here is an example using
97-
% starting_time and rate.
98+
% |ElectricalSeries| object to hold your multielectrode data. An
99+
% |ElectricalSeries| is an example of a |TimeSeries| object. For all
100+
% |TimeSeries| objects, you have 2 options for storing time information.
101+
% The first is to use |starting_time| and |rate|:
98102

99103
% generate data for demonstration
100104
data = reshape(1:10000, 10, 1000);
@@ -108,13 +112,12 @@
108112

109113
nwb.acquisition.set('ECoG', electrical_series);
110114
%%
111-
% You can also specify time using timestamps. This is particularly useful if
112-
% the timestamps are not evenly sampled. In this case, the electrical series
115+
% You can also specify time using |timestamps|. This is particularly useful if
116+
% the sample times are not evenly sampled. In this case, the electrical series
113117
% constructor will look like this
114118

115119
electrical_series = types.core.ElectricalSeries(...
116120
'timestamps', (1:1000)/200, ...
117-
'starting_time_rate', 200., ... % Hz
118121
'data', data,...
119122
'electrodes', electrode_table_region,...
120123
'data_unit','V');
@@ -125,70 +128,67 @@
125128
trials = types.core.TimeIntervals( ...
126129
'colnames', {'correct','start_time','stop_time'},...
127130
'description', 'trial data and properties', ...
128-
'id', types.core.ElementIdentifiers('data', 1:3),...
131+
'id', types.core.ElementIdentifiers('data', 0:2),...
129132
'start_time', types.core.VectorData('data', [.1, 1.5, 2.5],...
130-
'description','hi'),...
133+
'description','start time of trial'),...
131134
'stop_time', types.core.VectorData('data', [1., 2., 3.],...
132-
'description','hi'),...
135+
'description','end of each trial'),...
133136
'correct', types.core.VectorData('data', [false,true,false],...
134137
'description','my description'));
135138

136-
nwb.intervals.set('trials', trials);
139+
nwb.intervals_trials = trials;
137140

138141
%%
139142
% |colnames| is flexible - it can store any column names and the entries can
140143
% be any data type, which allows you to store any information you need about
141-
% trials. The units table stores information about cells and is created with
142-
% an analogous workflow.
144+
% trials.
145+
146+
%% Spikes
147+
% Spikes are stored in the |units| table, which uses 3 arrays to store the
148+
% spike times of all the cells.
149+
150+
%%
151+
%
152+
% <<UnitTimes.png>>
153+
%
154+
%%
155+
% to add spike data to the units table
156+
157+
spike_times = [0.1, 0.21, 0.34, 0.36, 0.4, 0.43, 0.5, 0.61, 0.66, 0.69];
158+
unit_ids = [0, 0, 1, 1, 2, 2, 0, 0, 1, 1];
159+
160+
[spike_times_vector, spike_times_index] = util.create_spike_times(unit_ids, spike_times);
161+
nwb.units = types.core.Units('colnames', {'spike_times', 'spike_times_index'},...
162+
'description','units table',...
163+
'id', types.core.ElementIdentifiers('data', 0:length(spike_times_index.data) - 1));
164+
nwb.units.spike_times = spike_times_vector;
165+
nwb.units.spike_times_index = spike_times_index;
143166

144167
%% Processing Modules
145168
% Measurements go in |acquisition| and subject or session data goes in
146-
% |general|, but if you have the result of an analysis, e.g. spike times,
147-
% you need to store this in a processing module. Here we make a processing
148-
% module called "cellular"
149-
150-
cell_mod = types.core.ProcessingModule('description', 'a test module');
169+
% |general|, but if you have the intermediate processing results, you
170+
% should put them in a processing module.
151171

152-
%% Spikes
153-
% There are two different ways of storing spikes (aka action potentials),
154-
% |Clustering| and |UnitTimes|. |Clustering| is more strightforward, and is used to mark
155-
% measured threshold crossings that are spike-sorted into different clusters,
156-
% indicating that they are believed to come from different neurons. The
157-
% advantage of this structure is that it is easy to write data via a stream
158-
% and it is easy to query based on time window (since the timestamps are
159-
% ordered).
172+
ecephys_mod = types.core.ProcessingModule('description', 'contains clustering data');
160173

161-
spike_times = [0.1, 0.21, 0.34, 0.36, 0.4, 0.43, 0.5, 0.61, 0.66, 0.69];
162-
cluster_ids = [0, 0, 1, 1, 2, 2, 0, 0, 1, 1];
174+
%%
175+
% The |Clustering| data structure holds information about the spike-sorting
176+
% process.
163177

164178
clustering = types.core.Clustering( ...
165179
'description', 'my_description',...
166-
'peak_over_rms',[1,2,3],...
180+
'peak_over_rms', [1, 2, 3],...
167181
'times', spike_times, ...
168182
'num', cluster_ids);
169183

170-
cell_mod.nwbdatainterface.set('clustering',clustering);
171-
nwb.processing.set('cellular', cell_mod);
184+
cell_mod.nwbdatainterface.set('clustering', clustering);
172185

173186
%%
174-
% The other structure is within the |units| table, which is organized by cell instead of
175-
% by time. The advantage of |units| is that it is more
176-
% parallel-friendly. It is easier to split the computation of by cells are
177-
% read/write in parallel, distributing the cells across the cores of your
178-
% computation network.
179-
%%
180-
%
181-
% <<UnitTimes.png>>
182-
%
183-
%%
184-
185-
[spike_times_vector, spike_times_index] = util.create_spike_times(cluster_ids, spike_times);
186-
nwb.units = types.core.Units('colnames',{'spike_times','spike_times_index'},...
187-
'description','units table',...
188-
'id', types.core.ElementIdentifiers('data',1:length(spike_times_index.data)));
189-
nwb.units.spike_times = spike_times_vector;
190-
nwb.units.spike_times_index = spike_times_index;
187+
% I am going to call this processing module "ecephys." As a convention, I
188+
% use the names of the NWB core namespace modules as the names of my
189+
% processing modules, however this is not a rule and you may use any name.
191190

191+
nwb.processing.set('ecephys', ecephys_mod);
192192

193193
%% Writing the file
194194
% Once you have added all of the data types you want to a file, you can save
@@ -197,7 +197,7 @@
197197
nwbExport(nwb, 'ecephys_tutorial.nwb')
198198

199199
%% Reading the file
200-
% load an NWB file object into memory with
200+
% load an NWB file object with
201201

202202
nwb2 = nwbRead('ecephys_tutorial.nwb');
203203

@@ -217,9 +217,8 @@
217217
disp(data(1:10, 1:10));
218218

219219
%%
220-
% Loading all of the data is not a problem for this small
221-
% dataset, but it can be a problem when dealing with real data that can be
222-
% several GBs or even TBs per session. In these cases you can load a specific secion of
220+
% Loading all of the data can be a problem when dealing with real data that can be
221+
% several GBs or even TBs per session. In these cases you can load a specific section of
223222
% data. For instance, here is how you would load data starting at the index
224223
% (1,1) and read 10 rows and 20 columns of data
225224

@@ -243,13 +242,13 @@
243242
[trial_data, tt] = util.loadTrialAlignedTimeSeriesData(nwb2, ...
244243
timeseries, window, conditions);
245244

246-
% plot data from the first electrode for those two trials (it's just noise in this example)
245+
% plot data from the first electrode for those two trials
247246
plot(tt, squeeze(trial_data(:,1,:)))
248247
xlabel('time (seconds)')
249248
ylabel(['ECoG (' timeseries.data_unit ')'])
250249

251-
%% Reading UnitTimes (RegionViews)
252-
% |UnitTimes| uses RegionViews to indicate which spikes belong to which cell.
250+
%% Reading units (RegionViews)
251+
% The |units| table uses an index array to indicate which spikes belong to which cell.
253252
% The structure is split up into 3 datasets (see Spikes secion):
254253
my_spike_times = nwb.units.spike_times;
255254
%%
@@ -264,9 +263,9 @@
264263

265264
%% External Links
266265
% NWB allows you to link to datasets within another file through HDF5
267-
% ExternalLinks. This is useful for separating out large datasets that are
266+
% |ExternalLink|s. This is useful for separating out large datasets that are
268267
% not always needed. It also allows you to store data once, and access it
269-
% across many NWB files, so it is useful for storing e.g. subject-related
268+
% across many NWB files, so it is useful for storing subject-related
270269
% data that is the same for all sessions. Here is an example of creating a
271270
% link from the Subject object from the |ecephys_tutorial.nwb| file we just
272271
% created in a new file.

0 commit comments

Comments
 (0)