Skip to content

Commit 8c5dc46

Browse files
mariomario
mario
authored and
mario
committed
corrected import from gadgets
1 parent b030179 commit 8c5dc46

21 files changed

+1757
-5
lines changed

code/python/build/lib/cni_tlbx/HGR.py

+435
Large diffs are not rendered by default.

code/python/build/lib/cni_tlbx/IRM.py

+313
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,313 @@
1+
'''
2+
-----------------------------------------------------------------------------
3+
LICENSE
4+
5+
Copyright 2020 Mario Senden
6+
7+
This program is free software: you can redistribute it and/or modify
8+
it under the terms of the GNU Lesser General Public License as published
9+
by the Free Software Foundation, either version 3 of the License, or
10+
at your option) any later version.
11+
12+
This program is distributed in the hope that it will be useful,
13+
but WITHOUT ANY WARRANTY; without even the implied warranty of
14+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15+
GNU Lesser General Public License for more details.
16+
17+
You should have received a copy of the GNU Lesser General Public License
18+
along with this program. If not, see <http://www.gnu.org/licenses/>.
19+
'''
20+
21+
import sys
22+
import numpy as np
23+
from scipy.stats import zscore
24+
from scipy.fft import fft, ifft
25+
from cni_toolbox.gadgets import two_gamma
26+
27+
class IRM:
28+
'''
29+
Input-referred model (IRM) mapping tool.
30+
31+
irm = IRM(parameters) creates an instance of the IRM class.
32+
parameters is a dictionary with 5 required keys
33+
- f_sampling: sampling frequency (1/TR)
34+
- n_samples : number of samples (volumes)
35+
- n_rows : number of rows (in-plane resolution)
36+
- n_cols : number of columns (in-plance resolution)
37+
- n_slices : number of slices
38+
39+
optional inputs are
40+
- hrf : either a column vector containing a single hemodynamic
41+
response used for every voxel;
42+
or a tensor with a unique hemodynamic response along
43+
its columns for each voxel.
44+
By default the canonical two-gamma hemodynamic response
45+
function is generated internally based on the scan parameters.
46+
47+
This class has the following functions
48+
49+
- hrf = IRM.get_hrf()
50+
- stimulus = IRM.get_stimulus()
51+
- tc = IRM.get_timecourses()
52+
- IRM.set_hrf(hrf)
53+
- IRM.set_stimulus(stimulus)
54+
- IRM.create_timecourses()
55+
- results = IRM.mapping(data)
56+
57+
58+
Typical workflow:
59+
1. irm = IRM(params)
60+
2. irm.set_stimulus()
61+
3. irm.create_timecourse(FUN,xdata)
62+
4. results = irm.mapping(data)
63+
'''
64+
65+
def __init__(self, parameters, hrf = None):
66+
self.f_sampling = parameters['f_sampling']
67+
self.p_sampling = 1 / self.f_sampling
68+
self.n_samples = parameters['n_samples']
69+
self.n_rows = parameters['n_rows']
70+
self.n_cols = parameters['n_cols']
71+
self.n_slices = parameters['n_slices']
72+
self.n_total = self.n_rows * self.n_cols * self.n_slices
73+
74+
if hrf != None:
75+
self.l_hrf = hrf.shape[0]
76+
if hrf.ndim>1:
77+
hrf = np.reshape(hrf, (self.l_hrf, self.n_total))
78+
self.hrf_fft = fft(np.vstack((hrf,
79+
np.zeros((self.n_samples,
80+
self.n_total)))),
81+
axis = 0)
82+
else:
83+
self.hrf_fft = fft(np.append(hrf,
84+
np.zeros(self.n_samples)),
85+
axis = 0 )
86+
else:
87+
self.l_hrf = int(34 * self.f_sampling)
88+
timepoints = np.arange(0,
89+
self.p_sampling * (self.n_samples +
90+
self.l_hrf) - 1,
91+
self.p_sampling)
92+
self.hrf_fft = fft(two_gamma(timepoints), axis = 0)
93+
94+
95+
def get_hrf(self):
96+
'''
97+
Returns
98+
-------
99+
hrf: floating point array
100+
hemodynamic response function(s) used by the class
101+
'''
102+
if self.hrf_fft.ndim>1:
103+
hrf = ifft(np.zqueeze(
104+
np.reshape(self.hrf,
105+
(self.l_hrf,
106+
self.n_rows,
107+
self.n_cols,
108+
self.n_slices))),
109+
axis = 0)[0:self.l_hrf, :]
110+
else:
111+
hrf = ifft(self.hrf_fft, axis = 0)[0:self.l_hrf]
112+
113+
return np.abs(hrf)
114+
115+
def get_stimulus(self):
116+
'''
117+
Returns
118+
-------
119+
floating point array (1D)
120+
stimulus used by the class
121+
'''
122+
123+
return self.stimulus
124+
125+
def get_timecourses(self):
126+
'''
127+
Returns
128+
-------
129+
floating point array (time-by-grid size)
130+
predicted timecourses
131+
'''
132+
133+
return np.abs(ifft(self.tc_fft, axis = 0)[0:self.n_samples, :])
134+
135+
def set_hrf(self, hrf):
136+
'''
137+
Parameters
138+
----------
139+
hrf: floating point array
140+
hemodynamic response function
141+
'''
142+
self.l_hrf = hrf.shape[0]
143+
if hrf.ndim>1:
144+
hrf = np.reshape(hrf, (self.l_hrf, self.n_total))
145+
self.hrf_fft = fft(np.vstack((hrf,
146+
np.zeros((self.n_samples,
147+
self.n_total)))),
148+
axis = 0)
149+
else:
150+
self.hrf_fft = fft(np.append(hrf,
151+
np.zeros(self.n_samples)),
152+
axis = 0)
153+
154+
def set_stimulus(self, stimulus):
155+
'''
156+
Parameters
157+
----------
158+
stimulus: floating point array (1D)
159+
stimulus used by the class
160+
'''
161+
self.stimulus = stimulus
162+
163+
def create_timecourses(self, FUN, xdata):
164+
'''
165+
creates predicted timecourses based on the stimulus protocol
166+
and a range of parameters for an input referred model.
167+
168+
Required inputs are
169+
- FUN: function handle
170+
defining the input referred model
171+
- xdata: dictionary with p elements (p = number of parameters).
172+
Each element needs to contain a column vector of variable
173+
length with parameter values to be explored.
174+
'''
175+
print('\ncreating timecourses')
176+
177+
self.xdata = xdata
178+
self.n_predictors = len(xdata)
179+
180+
n_observations = np.zeros(self.n_predictors)
181+
for idx, key in enumerate(self.xdata):
182+
n_observations[idx] = np.size(self.xdata[key])
183+
self.n_points = np.prod(n_observations).astype(int)
184+
185+
idx_all = np.arange(self.n_points)
186+
self.idx = np.zeros((self.n_points, self.n_predictors))
187+
for p in range(self.n_predictors):
188+
self.idx[:, p] = (idx_all // (np.prod(n_observations[p+1::]))) \
189+
% n_observations[p]
190+
self.idx = self.idx.astype(int)
191+
192+
tc = np.zeros((self.n_samples + self.l_hrf,
193+
self.n_points))
194+
195+
x = np.zeros(self.n_predictors)
196+
197+
for j in range(self.n_points):
198+
for p, key in enumerate(self.xdata):
199+
x[p] = self.xdata[key][self.idx[j, p]]
200+
tc[0:self.n_samples, j] = FUN(self.stimulus, x)
201+
i = int(j / self.n_points * 21)
202+
sys.stdout.write('\r')
203+
sys.stdout.write("[%-20s] %d%%"
204+
% ('='*i, 5*i))
205+
206+
self.tc_fft = fft(tc, axis = 0)
207+
208+
def mapping(self, data, threshold = 100, mask = []):
209+
'''
210+
identifies the best fitting timecourse for each voxel and
211+
returns a dictionary with keys corresponding to the
212+
parameters specified in xdata plus a key 'corr_fit'
213+
storing the fitness of the solution.
214+
215+
Required inputs are
216+
- data: floating point array
217+
empirically observed BOLD timecourses
218+
whose rows correspond to time (volumes).
219+
220+
Optional inputs are
221+
- threshold: float
222+
minimum voxel intensity (default = 100.0)
223+
- mask: boolean array
224+
binary mask for selecting voxels (default = []])
225+
'''
226+
print('\nmapping model parameters')
227+
228+
data = np.reshape(data.astype(float),
229+
(self.n_samples,
230+
self.n_total))
231+
232+
mean_signal = np.mean(data, axis = 0)
233+
data = zscore(data, axis = 0)
234+
235+
if np.size(mask)==0:
236+
mask = mean_signal >= threshold
237+
238+
mask = np.reshape(mask,self.n_total)
239+
voxel_index = np.where(mask)[0]
240+
n_voxels = voxel_index.size
241+
242+
mag_d = np.sqrt(np.sum(data[:, mask]**2, axis = 0))
243+
244+
results = {'corr_fit': np.zeros(self.n_total)}
245+
for key in self.xdata:
246+
results[key] = np.zeros(self.n_total)
247+
248+
if self.hrf_fft.ndim==1:
249+
tc = np.transpose(
250+
zscore(
251+
np.abs(
252+
ifft(self.tc_fft *
253+
np.expand_dims(self.hrf_fft,
254+
axis = 1), axis = 0)), axis = 0))
255+
tc = tc[:, 0:self.n_samples]
256+
mag_tc = np.sqrt(np.sum(tc**2, axis = 1))
257+
258+
for m in range(n_voxels):
259+
v = voxel_index[m]
260+
261+
CS = np.matmul(tc, data[:, v]) / (mag_tc * mag_d[m])
262+
idx_remove = (CS == np.Inf)| (CS == np.NaN);
263+
CS[idx_remove] = 0
264+
265+
results['corr_fit'][v] = np.max(CS)
266+
idx_best = np.argmax(CS)
267+
268+
for pos, key in enumerate(self.xdata):
269+
results[key][v] = self.xdata[key][self.idx[idx_best, pos]]
270+
271+
i = int(m / n_voxels * 21)
272+
sys.stdout.write('\r')
273+
sys.stdout.write("[%-20s] %d%%"
274+
% ('='*i, 5*i))
275+
276+
else:
277+
for m in range(n_voxels):
278+
v = voxel_index[m]
279+
280+
tc = np.transpose(
281+
zscore(
282+
np.abs(
283+
ifft(self.tc_fft *
284+
np.expand_dims(self.hrf_fft[:, v],
285+
axis = 1), axis = 0)), axis = 0))
286+
287+
tc = tc[:, 0:self.n_samples]
288+
mag_tc = np.sqrt(np.sum(tc**2, axis = 1))
289+
290+
CS = np.matmul(tc, data[:, v]) / (mag_tc * mag_d[m])
291+
idx_remove = (CS == np.Inf)| (CS == np.NaN);
292+
CS[idx_remove] = 0
293+
294+
results['corr_fit'][v] = np.max(CS)
295+
idx_best = np.argmax(CS)
296+
297+
for pos, key in enumerate(self.xdata):
298+
results[key][v] = self.xdata[key][self.idx[idx_best, pos]]
299+
300+
i = int(m / n_voxels * 21)
301+
sys.stdout.write('\r')
302+
sys.stdout.write("[%-20s] %d%%"
303+
% ('='*i, 5*i))
304+
305+
306+
for key in results:
307+
results[key] = np.squeeze(
308+
np.reshape(results[key],
309+
(self.n_rows,
310+
self.n_cols,
311+
self.n_slices)))
312+
313+
return results

0 commit comments

Comments
 (0)