Skip to content

Commit 982f565

Browse files
authored
Add deepsets and basic test (#47)
* Add deepsets and basic test * Update requirements * Removed PL tests and callbacks
1 parent 62e9cea commit 982f565

7 files changed

+143
-72
lines changed

econ_layers/callbacks.py

-15
This file was deleted.

econ_layers/layers.py

+99
Original file line numberDiff line numberDiff line change
@@ -153,3 +153,102 @@ def forward(self, input):
153153
return self.OutputRescalingLayer(input, out)
154154
else:
155155
return out
156+
157+
class DeepSet(nn.Module):
158+
def __init__(
159+
self,
160+
n_in: int,
161+
n_out: int,
162+
L: int,
163+
phi_layers: int,
164+
rho_layers: int,
165+
phi_hidden_dim: int = 128,
166+
rho_hidden_dim: int = 128,
167+
phi_activator: Optional[nn.Module] = lazy_instance(nn.ReLU),
168+
phi_hidden_bias: bool = True,
169+
phi_last_activator: Optional[nn.Module] = lazy_instance(nn.Identity),
170+
phi_last_bias=True,
171+
rho_activator: Optional[nn.Module] = lazy_instance(nn.ReLU),
172+
rho_hidden_bias: bool = True,
173+
rho_last_activator: Optional[nn.Module] = lazy_instance(nn.Identity),
174+
rho_last_bias=True,
175+
OutputRescalingLayer: Optional[nn.Module] = None,
176+
InputRescalingLayer: Optional[nn.Module] = None,
177+
):
178+
"""
179+
Init method.
180+
"""
181+
assert n_in == 1 # only supporting univariate states for now
182+
super().__init__() # init the base class
183+
self.rho = FlexibleSequential(
184+
L,
185+
n_out,
186+
rho_layers,
187+
rho_hidden_dim,
188+
rho_activator,
189+
rho_hidden_bias,
190+
rho_last_activator,
191+
rho_last_bias,
192+
OutputRescalingLayer = OutputRescalingLayer,
193+
)
194+
195+
self.phi = FlexibleSequential(
196+
n_in,
197+
L,
198+
phi_layers,
199+
phi_hidden_dim,
200+
phi_activator,
201+
phi_hidden_bias,
202+
phi_last_activator,
203+
phi_last_bias,
204+
InputRescalingLayer = InputRescalingLayer,
205+
)
206+
207+
def forward(self, X):
208+
num_batches, N = X.shape
209+
phi_X = torch.stack(
210+
[torch.mean(self.phi(X[i, :].reshape([N, 1])), 0) for i in range(num_batches)]
211+
)
212+
return self.rho(phi_X)
213+
214+
215+
216+
class DeepSetMoments(nn.Module):
217+
def __init__(
218+
self,
219+
n_in: int,
220+
n_out: int,
221+
L: int,
222+
rho_layers: int,
223+
rho_hidden_dim: int = 128,
224+
rho_activator: Optional[nn.Module] = lazy_instance(nn.ReLU),
225+
rho_hidden_bias: bool = True,
226+
rho_last_activator: Optional[nn.Module] = lazy_instance(nn.Identity),
227+
rho_last_bias=True,
228+
OutputRescalingLayer: Optional[nn.Module] = None,
229+
):
230+
"""
231+
Init method.
232+
"""
233+
assert n_in == 1 # only supporting univariate states for now
234+
super().__init__() # init the base class
235+
self.rho = FlexibleSequential(
236+
L,
237+
n_out,
238+
rho_layers,
239+
rho_hidden_dim,
240+
rho_activator,
241+
rho_hidden_bias,
242+
rho_last_activator,
243+
rho_last_bias,
244+
OutputRescalingLayer = OutputRescalingLayer,
245+
)
246+
247+
self.phi = Moments(L)
248+
249+
def forward(self, X):
250+
num_batches, N = X.shape
251+
phi_X = torch.stack(
252+
[torch.mean(self.phi(X[i, :].reshape([N, 1])), 0) for i in range(num_batches)]
253+
)
254+
return self.rho(phi_X)

requirements.txt

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
1-
pytorch-lightning >= 1.5.2
21
torch >= 1.10
32
pytest
43
black
54
numpy
6-
jsonargparse >= 4.0.4
5+
jsonargparse[signatures] >= 4.19

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,6 @@
5757
test_suite="tests",
5858
tests_require=test_requirements,
5959
url="https://github.com/HighDimensionalEconLab/econ_layers",
60-
version="0.0.27",
60+
version="0.0.28",
6161
zip_safe=False,
6262
)

tests/default_jsonargparse_test_1.yaml

-10
This file was deleted.

tests/test_deep_sets.py

+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
"""Tests for Exponential layer"""
2+
3+
import pytest
4+
import torch
5+
import numpy as np
6+
import torch
7+
from torch import nn
8+
import numpy.testing
9+
import torch.autograd.gradcheck
10+
from torch.autograd import Variable
11+
12+
from econ_layers.layers import (
13+
FlexibleSequential,
14+
RescaleOutputsByInput,
15+
ScalarExponentialRescaling,
16+
DeepSet,
17+
DeepSetMoments,
18+
)
19+
20+
torch.set_printoptions(16) # to be able to see what is going on
21+
22+
23+
# Unit testing of the autodiff. Easy here, but need to be more advanced later
24+
def test_deep_set_derivative():
25+
n_in = 1 # one dimensional state per "agent". Only n_in = 1 is supported right now
26+
N = 5 # number of "agents"
27+
n_out = 2
28+
mod = DeepSet(n_in, n_out, L = 2, phi_layers=2, phi_hidden_dim=32, rho_layers=2, rho_hidden_dim=32).double()
29+
batches = 10
30+
input = (Variable(torch.randn(N, batches).double(), requires_grad=True),)
31+
assert torch.autograd.gradcheck(mod, input)
32+
33+
34+
def test_deep_set_moments_derivative():
35+
n_in = 1 # one dimensional state per "agent". Only n_in = 1 is supported right now
36+
N = 5 # number of "agents"
37+
n_out = 2
38+
num_moments = 3
39+
mod = DeepSetMoments(n_in, n_out, L = num_moments, rho_layers=2, rho_hidden_dim=32).double()
40+
batches = 10
41+
input = (Variable(torch.randn(N, batches).double(), requires_grad=True),)
42+
assert torch.autograd.gradcheck(mod, input)

tests/test_jsonargparse.py

-44
This file was deleted.

0 commit comments

Comments
 (0)