Skip to content

Commit 04b64d7

Browse files
committed
style: clean commented print statements.
Fix comment typo. Remove commented code from 1 year ago.
1 parent d51b9a1 commit 04b64d7

File tree

1 file changed

+34
-84
lines changed

1 file changed

+34
-84
lines changed

nilmtk_contrib/disaggregate/afhmm_sac.py

Lines changed: 34 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -52,32 +52,22 @@ def partial_fit(self, train_main, train_appliances, **load_kwargs):
5252
train_app_tmp.append((app_name,df_list))
5353

5454
train_appliances = train_app_tmp
55-
56-
5755
learnt_model = OrderedDict()
58-
5956
means_vector = []
60-
6157
one_hot_states_vector = []
62-
6358
pi_s_vector = []
64-
6559
transmat_vector = []
66-
6760
states_vector = []
6861

6962
train_main = train_main.values.flatten().reshape((-1,1))
70-
63+
7164
for appliance_name, power in train_appliances:
7265
#print (appliance_name)
7366
self.appliances.append(appliance_name)
74-
7567
X = power.values.reshape((-1,1))
76-
7768
learnt_model[appliance_name] = hmm.GaussianHMM(self.default_num_states, "full")
7869
# Fit
7970
learnt_model[appliance_name].fit(X)
80-
8171
means = learnt_model[appliance_name].means_.flatten().reshape((-1,1))
8272
states = learnt_model[appliance_name].predict(X)
8373
transmat = learnt_model[appliance_name].transmat_
@@ -88,17 +78,16 @@ def partial_fit(self, train_main, train_appliances, **load_kwargs):
8878

8979
for i in keys:
9080
total+=counter[i]
91-
92-
pi = []
9381

82+
pi = []
9483
for i in keys:
9584
pi.append(counter[i]/total)
96-
85+
9786
pi = np.array(pi)
9887

9988
nb_classes = self.default_num_states
10089
targets = states.reshape(-1)
101-
90+
10291
means_vector.append(means)
10392
pi_s_vector.append(pi)
10493
transmat_vector.append(transmat.T)
@@ -110,46 +99,7 @@ def partial_fit(self, train_main, train_appliances, **load_kwargs):
11099
self.pi_s_vector = pi_s_vector
111100
self.means_vector = means_vector
112101
self.transmat_vector = transmat_vector
113-
114-
# print(transmat_vector)
115-
# print (means_vector)
116-
# print (states_vector)
117-
# print (pi_s_vector)
118102
print ("Finished Training")
119-
# print (self.signal_aggregates)
120-
# print (np.log(transmat))
121-
# print(pi)
122-
# print (np.log(pi))
123-
#print (np.sum(transmat_vector[0],axis=1))
124-
#print (np.sum(transmat_vector[0],axis=0))
125-
#print (states.shape)
126-
#print (one_hot_targets.shape)
127-
128-
# one_hot_states_vector = np.array(one_hot_states_vector)
129-
130-
# # print (transmat_vector[0])
131-
# # print (np.sum(transmat_vector[0],axis=0))
132-
# # print (np.sum(transmat_vector[0],axis=1))
133-
# appliance_variable_matrix = []
134-
135-
# #print (len(states_vector))
136-
# #variable_matrix = np.zeros((len(appliance_states),self.default_num_states,self.default_num_states))
137-
138-
# for appliance_states in states_vector:
139-
140-
# variable_matrix = np.zeros((len(appliance_states),self.default_num_states,self.default_num_states))
141-
142-
# for i in range(1,len(appliance_states)):
143-
# current_state = appliance_states[i]
144-
# previous_state = appliance_states[i-1]
145-
# variable_matrix[i,current_state, previous_state] = 1
146-
# appliance_variable_matrix.append(variable_matrix)
147-
148-
# appliance_variable_matrix = np.array(appliance_variable_matrix)
149-
# term_1_list = []
150-
151-
# term_2_list = []
152-
153103

154104
def disaggregate_thread(self, test_mains,index,d):
155105
means_vector = self.means_vector
@@ -168,57 +118,67 @@ def disaggregate_thread(self, test_mains,index,d):
168118
sigma = (test_mains.flatten() - usage.flatten()).reshape((-1,1))
169119
sigma = np.where(sigma<1,1,sigma)
170120
else:
171-
172121
if flag==0:
173122
constraints = []
174123
cvx_state_vectors = []
175124
cvx_variable_matrices = []
176125
delta = cvx.Variable(shape=(len(test_mains),1), name='delta_t')
177126

178127
for appliance_id in range(self.num_appliances):
179-
state_vector = cvx.Variable(shape=(len(test_mains), self.default_num_states), name='state_vec-%s'%(appliance_id))
128+
state_vector = cvx.Variable(
129+
shape=(len(test_mains),
130+
self.default_num_states),
131+
name='state_vec-%s'%(appliance_id)
132+
)
180133
cvx_state_vectors.append(state_vector)
181134
# Enforcing that their values are ranged
182135
constraints+=[cvx_state_vectors[appliance_id]>=0]
183136
constraints+=[cvx_state_vectors[appliance_id]<=1]
184137
# Enforcing that sum of states equals 1
185138
for t in range(len(test_mains)): # 6c
186139
constraints+=[cvx.sum(cvx_state_vectors[appliance_id][t])==1]
140+
187141
# Creating Variable matrices for every appliance
188142
appliance_variable_matrix = []
189143
for t in range(len(test_mains)):
190-
matrix = cvx.Variable(shape=(self.default_num_states, self.default_num_states), name='variable_matrix-%s-%d'%(appliance_id,t))
144+
matrix = cvx.Variable(
145+
shape=(self.default_num_states, self.default_num_states),
146+
name='variable_matrix-%s-%d'%(appliance_id,t)
147+
)
191148
appliance_variable_matrix.append(matrix)
149+
192150
cvx_variable_matrices.append(appliance_variable_matrix)
193151
# Enforcing that their values are ranged
194152
for t in range(len(test_mains)):
195153
constraints+=[cvx_variable_matrices[appliance_id][t]>=0]
196154
constraints+=[cvx_variable_matrices[appliance_id][t]<=1]
155+
197156
# Constraint 6e
198157
for t in range(0,len(test_mains)): # 6e
199158
for i in range(self.default_num_states):
200-
constraints+=[cvx.sum(((cvx_variable_matrices[appliance_id][t]).T)[i]) == cvx_state_vectors[appliance_id][t][i]]
159+
constraints += [
160+
cvx.sum(((cvx_variable_matrices[appliance_id][t]).T)[i]) == cvx_state_vectors[appliance_id][t][i]
161+
]
162+
201163
# Constraint 6d
202164
for t in range(1,len(test_mains)): # 6d
203165
for i in range(self.default_num_states):
204-
constraints+=[cvx.sum(cvx_variable_matrices[appliance_id][t][i]) == cvx_state_vectors[appliance_id][t-1][i]]
205-
166+
constraints += [
167+
cvx.sum(cvx_variable_matrices[appliance_id][t][i]) == cvx_state_vectors[appliance_id][t-1][i]
168+
]
206169

207170
for appliance_id in range(self.num_appliances):
208171
appliance_usage = cvx_state_vectors[appliance_id]@means_vector[appliance_id]
209172
total_appliance_usage = cvx.sum(appliance_usage)
210-
constraints+=[total_appliance_usage <= self.signal_aggregates[self.appliances[appliance_id]]]
211-
173+
constraints += [
174+
total_appliance_usage <= self.signal_aggregates[self.appliances[appliance_id]]
175+
]
212176

213177
# Second order cone constraints
214-
215178
total_observed_reading = np.zeros((test_mains.shape))
216-
#print (len(cvx_state_vectors))
217179
for appliance_id in range(self.num_appliances):
218-
total_observed_reading+=cvx_state_vectors[appliance_id]@means_vector[appliance_id]
180+
total_observed_reading += cvx_state_vectors[appliance_id]@means_vector[appliance_id]
219181
flag=1
220-
221-
222182
term_1 = 0
223183
term_2 = 0
224184

@@ -234,21 +194,20 @@ def disaggregate_thread(self, test_mains,index,d):
234194
# The expression involving start states
235195
first_one_hot_states = one_hot_states[0]
236196
term_2-= cvx.sum(cvx.multiply(first_one_hot_states,np.log(pi)))
237-
197+
238198
flag = 1
239199

240200
expression = 0
241201
term_3 = 0
242202
term_4 = 0
243-
244203
for t in range(len(test_mains)):
245-
term_4+= .5 * ((test_mains[t][0] - total_observed_reading[t][0])**2 / (sigma[t]**2))
204+
term_4+= .5 * ((test_mains[t][0] - total_observed_reading[t][0])**2 / (sigma[t]**2))
246205
term_3+= .5 * (np.log(sigma[t]**2))
206+
247207
expression = term_1 + term_2 + term_3 + term_4
248208
expression = cvx.Minimize(expression)
249209
u = time.time()
250210
prob = cvx.Problem(expression, constraints)
251-
252211
prob.solve(solver=cvx.SCS,verbose=False, warm_start=True)
253212
s_ = [i.value for i in cvx_state_vectors]
254213

@@ -260,22 +219,14 @@ def disaggregate_thread(self, test_mains,index,d):
260219

261220
d[index] = pd.DataFrame(prediction_dict,dtype='float32')
262221

263-
264-
265-
266-
267-
268-
269222
def disaggregate_chunk(self, test_mains_list):
270-
271-
# Sistributes the test mains across multiple threads and runs them in parallel
223+
# Distributes the test mains across multiple threads and runs them in parallel
272224
manager = Manager()
273225
d = manager.dict()
274-
275226
predictions_lst = []
276-
for test_mains in test_mains_list:
227+
for test_mains in test_mains_list:
277228
test_mains_big = test_mains.values.flatten().reshape((-1,1))
278-
self.arr_of_results = []
229+
self.arr_of_results = []
279230
st = time.time()
280231
threads = []
281232
for test_block in range(int(math.ceil(len(test_mains_big)/self.time_period))):
@@ -293,7 +244,6 @@ def disaggregate_chunk(self, test_mains_list):
293244
self.arr_of_results.append(d[i])
294245
prediction = pd.concat(self.arr_of_results,axis=0)
295246
predictions_lst.append(prediction)
296-
247+
297248
return predictions_lst
298249

299-

0 commit comments

Comments
 (0)