-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathEncapsulage.py
56 lines (50 loc) · 2.02 KB
/
Encapsulage.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from Linear import *
class Sequential:
def __init__(self):
self.moduleList=[]
self.forwards = None
self.histLoss = []
def add_module(self,module):
self.moduleList.append(module)
def forward(self,datax):
self.forwards=[self.moduleList[0].forward(datax)]
for i in range(1,len(self.moduleList)):
# print(self.forwards[-1])
self.forwards.append(self.moduleList[i].forward(self.forwards[-1]))
return self.forwards[-1]
def backward(self,X,Y,gradient_step=1e-3,fctsort = None,loss = MSE):
#loss
loss = loss()
res = self.forwards[-1]
if fctsort:
res = fctsort(res)
# print(Y.shape,res.shape)
self.histLoss.append(sum(loss.forward(Y, res)))
# retro-propager
delta = loss.backward(Y, res)
# print("delta",delta[0])
for i in range(len(self.moduleList)-1,0,-1):
self.moduleList[i].zero_grad()
self.moduleList[i].backward_update_gradient(self.forwards[i-1], delta)
self.moduleList[i].update_parameters(gradient_step)
delta = self.moduleList[i].backward_delta(self.forwards[i-1], delta)
self.moduleList[0].zero_grad()
self.moduleList[0].backward_update_gradient(X, delta)
self.moduleList[0].update_parameters(gradient_step)
class Optim:
def __init__(self,moduleList,loss=MSE,eps=1e-3,fctsort = None):
self.moduleList=moduleList
self.loss=loss
self.eps=eps
self.fctsort = fctsort
def step(self,batch_x,batch_y):
self.moduleList.forward(batch_x)
self.moduleList.backward(batch_x, batch_y,fctsort = self.fctsort,gradient_step=self.eps,loss = self.loss)
def SGD(moduleList,X,Y,batch_size,loss,fctsort,maxiter):
rn = Optim(moduleList,loss=loss,fctsort=fctsort)
for _ in range(maxiter):
indx=np.random.choice([i for i in range(len(X))],size=batch_size)
batch_x=X[indx]
batch_y=Y[indx]
rn.step(batch_x, batch_y)
return rn