Skip to content

convex domain for ball #78

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/decomon/backward_layers/activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ def backward_relu(
upper, lower = x[:nb_tensors]
elif mode == ForwardMode.AFFINE:
z_, w_u_, b_u_, w_l_, b_l_ = x[:nb_tensors]
upper = get_upper(z_, w_u_, b_u_)
lower = get_lower(z_, w_l_, b_l_)
upper = get_upper(z_, w_u_, b_u_, convex_domain=convex_domain)
lower = get_lower(z_, w_l_, b_l_, convex_domain=convex_domain)
elif mode == ForwardMode.HYBRID:
_, upper, _, _, lower, _, _ = x[:nb_tensors]
else:
Expand Down
13 changes: 8 additions & 5 deletions src/decomon/models/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,9 @@ def clone(
if isinstance(method, str):
method = ConvertMethod(method.lower())

if len(convex_domain) and isinstance(convex_domain["name"], str):
convex_domain["name"] = ConvexDomainType(convex_domain["name"].lower())

if not to_keras:
raise NotImplementedError("Only convert to Keras for now.")

Expand Down Expand Up @@ -262,10 +265,10 @@ def clone(

if convex_domain["p"] == np.inf:
radius = convex_domain["eps"]
u_c_tensor = Lambda(
lambda var: var + K.cast(radius, dtype=model.layers[0].dtype), dtype=model.layers[0].dtype
)(z_tensor)
if ibp_:
u_c_tensor = Lambda(
lambda var: var + K.cast(radius, dtype=model.layers[0].dtype), dtype=model.layers[0].dtype
)(z_tensor)
l_c_tensor = Lambda(
lambda var: var - K.cast(radius, dtype=model.layers[0].dtype), dtype=model.layers[0].dtype
)(z_tensor)
Expand All @@ -282,9 +285,9 @@ def clone(

def get_bounds(z: tf.Tensor) -> List[tf.Tensor]:
output = []
W = tf.linalg.diag(z_value * z + o_value)
b = z_value * z
if affine_:
W = tf.linalg.diag(z_value * z + o_value)
b = z_value * z
output += [W, b]
if ibp_:
u_c_ = get_upper(z, W, b, convex_domain)
Expand Down
79 changes: 79 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1190,6 +1190,85 @@ def toy_struct_cnn(dtype="float32"):
]
return Sequential(layers)

def assert_output_properties_ball(
x_, y_, h_, g_, x_center_, radius, p, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, name, decimal=5
):

if y_ is None:
y_ = h_ + g_
if h_ is not None:

assert_almost_equal(
h_ + g_,
y_,
decimal=decimal,
err_msg="decomposition error for function {}".format(name),
)

if w_u_ is not None or w_l_ is not None:

x_expand = x_ + np.zeros_like(x_)
n_expand = len(w_u_.shape) - len(x_expand.shape)
for i in range(n_expand):
x_expand = np.expand_dims(x_expand, -1)

if p == 2:
norm = lambda x: np.sqrt(np.sum(x**2))
if p == np.inf:
norm = lambda x: np.max(np.abs(x))

if w_l_ is not None:
lower_ = np.sum(w_l_ * x_expand, 1) + b_l_ - radius * norm(w_l_)
if w_u_ is not None:
upper_ = np.sum(w_u_ * x_expand, 1) + b_u_ + radius * norm(w_u_)

# check that the functions h_ and g_ remains monotonic
if h_ is not None:
assert_almost_equal(
np.clip(h_[:-1] - h_[1:], 0, np.inf),
np.zeros_like(h_[1:]),
decimal=decimal,
err_msg="h is not increasing for function {}".format(name),
)
assert_almost_equal(
np.clip(g_[1:] - g_[:-1], 0, np.inf),
np.zeros_like(g_[1:]),
decimal=decimal,
err_msg="g is not increasing for function {}".format(name),
)

#
if w_u_ is not None:
if K.floatx() == "float32":
assert_almost_equal(
np.clip(y_ - upper_, 0.0, 1e6),
np.zeros_like(y_),
decimal=decimal,
err_msg="upper <y",
)
if w_l_ is not None:
if K.floatx() == "float32":
assert_almost_equal(
np.clip(lower_ - y_, 0.0, np.inf),
np.zeros_like(y_),
decimal=decimal,
err_msg="lower_ >y",
)

if l_c_ is not None:
assert_almost_equal(
np.clip(l_c_ - y_, 0.0, np.inf),
np.zeros_like(y_),
decimal=decimal,
err_msg="l_c >y",
)
assert_almost_equal(
np.clip(y_ - u_c_, 0.0, 1e6),
np.zeros_like(y_),
decimal=decimal,
err_msg="u_c <y",
)


@pytest.fixture
def helpers():
Expand Down
73 changes: 73 additions & 0 deletions tests/test_clone.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# creating toy network and assess that the decomposition is correct


import numpy as np
import pytest
import tensorflow.keras.backend as K

Expand Down Expand Up @@ -114,3 +115,75 @@ def test_convert_1D_backward_slope(slope, helpers):
layer_class_name = layer.__class__.__name__
if layer_class_name.endswith("Activation"):
assert layer.slope == Slope(slope)


# test different convex domains
@pytest.mark.parametrize(
"p",
[2, np.inf],
)
def test_convert_1D_ball(p, n, method, mode, floatx, helpers):
if not helpers.is_method_mode_compatible(method=method, mode=mode):
# skip method=ibp/crown-ibp with mode=affine/hybrid
return

K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 2

inputs = helpers.get_tensor_decomposition_1d_box(dc_decomp=False)
inputs_ = helpers.get_standard_values_1d_box(n, dc_decomp=False)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l = inputs
x_ = inputs_[0]
z_ = inputs_[2]

radius = np.max(z_[:, 1] - z_[:, 0])
convex_domain = {"name": "ball", "p": p, "eps": radius}
z_ball = np.mean(z_, 1)
ref_nn = helpers.toy_network_tutorial(dtype=K.floatx())
ref_nn(inputs[1])

ibp = True
affine = True
mode = ForwardMode(mode)
if mode == ForwardMode.AFFINE:
ibp = False
if mode == ForwardMode.IBP:
affine = False

f_dense = clone(ref_nn, method=method, convex_domain=convex_domain, final_ibp=ibp, final_affine=affine)
f_ref = K.function(inputs, ref_nn(inputs[1]))
y_ref = f_ref(inputs_)

u_c_, w_u_, b_u_, l_c_, w_l_, b_l_ = [None] * 6
if mode == ForwardMode.HYBRID:
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_ = f_dense(z_ball)
elif mode == ForwardMode.IBP:
u_c_, l_c_ = f_dense(z_ball)
elif mode == ForwardMode.AFFINE:
z_, w_u_, b_u_, w_l_, b_l_ = f_dense(z_ball)
else:
raise ValueError("Unknown mode.")

helpers.assert_output_properties_ball(
x_,
y_ref,
None,
None,
z_,
radius,
p,
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"dense_{}".format(n),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)