This repository was archived by the owner on May 27, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpy3generatefeatureweights.py
126 lines (121 loc) · 4.89 KB
/
py3generatefeatureweights.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#!/usr/bin/python
import numpy as np
import parsematlab
import loaddata
import swlda
from iwafgui import Error, Info, SaveAs
def exportToPRM(channels, weights, epoch_length):
return ('Filtering:LinearClassifier matrix Classifier= %(lenweights)i {' + \
' input%%20channel input%%20element%%20(bin) output%%20channel 4 } ' + \
'%(weights)s // Linear classification matrix in sparse representati' + \
'on\r\n' + \
'Filtering:P3TemporalFilter int EpochLength= %(epochlen)ims // Leng' + \
'th of data epoch from stimulus onset\r\n' + \
'Filtering:SpatialFilter matrix SpatialFilter= %(lenchannels)i %(le' + \
'nchannels)i %(eye)s // columns represent input channels, rows repr' + \
'esent output channels\r\n' + \
'Source:Online%%20Processing list TransmitChList= %(lenchannels)i %' + \
'(channels)s // list of transmitted channels\r\n') % {
'lenweights': weights.shape[0],
'weights': ' '.join(
[
'%i %i %i %f' % (
int(row[0]), int(row[1]), int(row[2]), row[3]
)
for row in weights
]
),
'epochlen': epoch_length,
'lenchannels': channels.size,
'eye': ' '.join(['%f' % i for i in np.eye(channels.size).ravel()]),
'channels': ' '.join(['%i' % i for i in channels]),
}
def generateFeatureWeights(name, values):
args = values['generation-args'][1]
errors = []
for key in args:
if key in ('removeanomalies', 'classificationmethod'):
continue
label, value = args[key]
value = parsematlab.parse(value)
if isinstance(value, str):
errors.append(label + '\n ' + value.replace('\n', '\n '))
args[key] = value
if len(errors) > 0:
Error('\n\n'.join(errors))
return
response_window = args['responsewindow']
decimation_frequency = args['decimationfrequency']
max_model_features = args['maxmodelfeatures']
penter = args['penter']
premove = args['premove']
random_sample_percent = args['randompercent']
channelset = args['channelset'] - 1
fnames = values['flist'][1]
weightwidget = values['weightfile'][0]
removeanomalies = args['removeanomalies'][1]
classificationmethod = args['classificationmethod'][1]
data = []
type = []
samplingrate = None
channels = None
try:
for fname in fnames:
result = loaddata.load_data(fname, response_window, None,
removeanomalies = removeanomalies)
if isinstance(result, str):
Error(result)
return
if samplingrate == None:
samplingrate = result[2]
if samplingrate != result[2]:
Error('Not all data files have the same sampling rate.')
return
if channels == None:
channels = result[0].shape[2]
if channels != result[0].shape[2]:
Error('Not all data files have the same number of channels.')
return
try:
data.append(result[0][:, :, channelset])
except IndexError:
Error('"Channel Set" is not a subset of the available ' + \
'channels.')
return
type.append(result[1])
if len(data) == 0 or len(type) == 0:
Error('You must select some data from which to generate ' + \
'the weights.')
return
data = np.concatenate(data)
type = np.concatenate(type)
randomindices = np.arange(data.shape[0], dtype = int)
np.random.shuffle(randomindices)
randomindices = randomindices[:data.shape[0] * random_sample_percent // 100]
randomindices.sort()
data = data[randomindices]
type = type[randomindices]
result = swlda.swlda(data, type, samplingrate, response_window,
decimation_frequency, max_model_features, penter, premove)
if isinstance(result, str):
Error(result)
return
channels, weights = result
channels = channelset[channels - 1] + 1 # Convert from one-based for
# indexing, and then to one-based for human readability.
prm = exportToPRM(channels, weights, response_window[1])
try:
fname = SaveAs(filetypes = [('Parameter Files', '.prm')],
defaultextension = 'prm')
if fname:
prmfile = open(fname, 'wb')
prmfile.write(prm)
prmfile.close()
weightwidget.setContents(fname)
except:
Error('Could not write PRM file.')
return
except MemoryError:
Error('Could not fit all the selected data in memory.\n' + \
'Try loading fewer data files.')
return