Skip to content

Commit

Permalink
Replace .numpy() (tf specific) by keras.ops.convert_to_numpy()
Browse files Browse the repository at this point in the history
  • Loading branch information
nhuet committed Jan 8, 2024
1 parent baf51b5 commit 8ddd0b2
Show file tree
Hide file tree
Showing 8 changed files with 38 additions and 36 deletions.
5 changes: 3 additions & 2 deletions src/decomon/models/models.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import Any, Dict, List, Optional, Union

import keras
import keras.ops as K
import numpy as np
from keras import Model
from keras.utils import serialize_keras_object
Expand Down Expand Up @@ -113,9 +114,9 @@ def predict_on_single_batch_np(
"""
output_tensors = self(inputs)
if isinstance(output_tensors, list):
return [output.numpy() for output in output_tensors]
return [K.convert_to_numpy(output) for output in output_tensors]
else:
return output_tensors.numpy()
return K.convert_to_numpy(output_tensors)


def _check_domain(
Expand Down
8 changes: 4 additions & 4 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,9 +164,9 @@ def __init__(self, inputs: List[KerasTensor], outputs: List[KerasTensor]):
def __call__(self, inputs_: List[np.ndarray]):
output_tensors = self._model(inputs_)
if isinstance(output_tensors, list):
return [output.numpy() for output in output_tensors]
return [K.convert_to_numpy(output) for output in output_tensors]
else:
return output_tensors.numpy()
return K.convert_to_numpy(output_tensors)


class Helpers:
Expand Down Expand Up @@ -220,9 +220,9 @@ def predict_on_small_numpy(
"""
output_tensors = model(x)
if isinstance(output_tensors, list):
return [output.numpy() for output in output_tensors]
return [K.convert_to_numpy(output) for output in output_tensors]
else:
return output_tensors.numpy()
return K.convert_to_numpy(output_tensors)

@staticmethod
def get_standard_values_1d_box(n, dc_decomp=True, grad_bounds=False, nb=100):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_activation_1D_box(n, mode, floatx, decimal, helpers, activation_func, t
input_ref_ = helpers.get_input_ref_from_full_inputs(inputs=inputs_)

# reference output
output_ref_ = tensor_func(input_ref_).numpy()
output_ref_ = K.convert_to_numpy(tensor_func(input_ref_))

# decomon output
output = activation_func(inputs_for_mode, dc_decomp=dc_decomp, mode=mode)
Expand Down
12 changes: 6 additions & 6 deletions tests/test_clone.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,9 +258,9 @@ def test_clone_full_deellip_model_forward(method, mode, helpers):

# flatten inputs
preprocess_layer = Flatten(data_format=data_format)
input_ref_reshaped_ = preprocess_layer(input_ref_).numpy()
input_ref_min_reshaped_ = preprocess_layer(input_ref_min_).numpy()
input_ref_max_reshaped_ = preprocess_layer(input_ref_max_).numpy()
input_ref_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_))
input_ref_min_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_min_))
input_ref_max_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_max_))

# decomon inputs
input_decomon_ = np.concatenate((input_ref_min_reshaped_[:, None], input_ref_max_reshaped_[:, None]), axis=1)
Expand Down Expand Up @@ -392,9 +392,9 @@ def test_convert_cnn(method, mode, helpers):

# flatten inputs
preprocess_layer = Flatten(data_format=data_format)
input_ref_reshaped_ = preprocess_layer(input_ref_).numpy()
input_ref_min_reshaped_ = preprocess_layer(input_ref_min_).numpy()
input_ref_max_reshaped_ = preprocess_layer(input_ref_max_).numpy()
input_ref_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_))
input_ref_min_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_min_))
input_ref_max_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_max_))

input_decomon_ = np.concatenate((input_ref_min_reshaped_[:, None], input_ref_max_reshaped_[:, None]), axis=1)

Expand Down
16 changes: 8 additions & 8 deletions tests/test_decomon_reset_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,11 @@ def test_decomondense_reset_layer(helpers, use_bias):

decomon_layer.reset_layer(layer)
assert decomon_layer.kernel is not layer.kernel
assert_almost_equal(decomon_layer.kernel.numpy(), layer.kernel.numpy())
assert_almost_equal(K.convert_to_numpy(decomon_layer.kernel), K.convert_to_numpy(layer.kernel))
if use_bias:
assert len(layer.weights) == 2
assert decomon_layer.bias is not layer.bias
assert_almost_equal(decomon_layer.bias.numpy(), layer.bias.numpy())
assert_almost_equal(K.convert_to_numpy(decomon_layer.bias), K.convert_to_numpy(layer.bias))
else:
assert len(layer.weights) == 1

Expand Down Expand Up @@ -70,8 +70,8 @@ def test_decomondense_reset_layer_decomon_with_new_weights(helpers):
decomon_layer.reset_layer(layer)
assert decomon_layer.kernel is not layer.kernel
assert decomon_layer.bias is not layer.bias
assert_almost_equal(decomon_layer.kernel.numpy(), layer.kernel.numpy())
assert_almost_equal(decomon_layer.bias.numpy(), layer.bias.numpy())
assert_almost_equal(K.convert_to_numpy(decomon_layer.kernel), K.convert_to_numpy(layer.kernel))
assert_almost_equal(K.convert_to_numpy(decomon_layer.bias), K.convert_to_numpy(layer.bias))


def test_decomondense_reset_layer_keras_with_new_weights(helpers):
Expand Down Expand Up @@ -99,8 +99,8 @@ def test_decomondense_reset_layer_keras_with_new_weights(helpers):
decomon_layer.reset_layer(layer)
assert decomon_layer.kernel is not layer.kernel
assert decomon_layer.bias is not layer.bias
assert_almost_equal(decomon_layer.kernel.numpy(), layer.kernel.numpy())
assert_almost_equal(decomon_layer.bias.numpy(), layer.bias.numpy())
assert_almost_equal(K.convert_to_numpy(decomon_layer.kernel), K.convert_to_numpy(layer.kernel))
assert_almost_equal(K.convert_to_numpy(decomon_layer.bias), K.convert_to_numpy(layer.bias))


def test_decomondense_reset_layer_ko_keraslayer_not_nuilt():
Expand Down Expand Up @@ -155,10 +155,10 @@ def test_decomonconv2d_reset_layer(helpers, use_bias):

decomon_layer.reset_layer(layer)
assert decomon_layer.kernel is not layer.kernel
assert_almost_equal(decomon_layer.kernel.numpy(), layer.kernel.numpy())
assert_almost_equal(K.convert_to_numpy(decomon_layer.kernel), K.convert_to_numpy(layer.kernel))
if use_bias:
assert decomon_layer.bias is not layer.bias
assert_almost_equal(decomon_layer.bias.numpy(), layer.bias.numpy())
assert_almost_equal(K.convert_to_numpy(decomon_layer.bias), K.convert_to_numpy(layer.bias))


@pytest.mark.parametrize(
Expand Down
16 changes: 8 additions & 8 deletions tests/test_models_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ def test_split_activation_do_split(
input_shape_with_batch_size = (5,) + input_shape_wo_batchsize
flatten_dim = np.prod(input_shape_with_batch_size)
inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size)
output_np_ref = layer(inputs_np).numpy()
output_np_new = activation_layer(layer_wo_activation(inputs_np)).numpy()
output_np_ref = K.convert_to_numpy(layer(inputs_np))
output_np_new = K.convert_to_numpy(activation_layer(layer_wo_activation(inputs_np)))
assert_almost_equal(output_np_new, output_np_ref)
# check same trainable weights
original_layer_weights = layer.get_weights()
Expand Down Expand Up @@ -158,8 +158,8 @@ def test_split_activation_do_split_with_deellip(
input_shape_with_batch_size = (5,) + input_shape_wo_batchsize
flatten_dim = np.prod(input_shape_with_batch_size)
inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size)
output_np_ref = layer(inputs_np).numpy()
output_np_new = activation_layer(layer_wo_activation(inputs_np)).numpy()
output_np_ref = K.convert_to_numpy(layer(inputs_np))
output_np_new = K.convert_to_numpy(activation_layer(layer_wo_activation(inputs_np)))
assert_almost_equal(output_np_new, output_np_ref)
# check same trainable weights
original_layer_weights = layer.get_weights()
Expand Down Expand Up @@ -233,8 +233,8 @@ def test_convert_deellip_to_keras_spectraldense():
assert keras_layer.name.startswith(layer.name)
# same output?
input_tensor = K.ones((4, 1))
output_ref = layer(input_tensor).numpy()
new_output = keras_layer(input_tensor).numpy()
output_ref = K.convert_to_numpy(layer(input_tensor))
new_output = K.convert_to_numpy(keras_layer(input_tensor))
assert_almost_equal(new_output, output_ref)
# idempotency
keras_layer2 = convert_deellip_to_keras(keras_layer)
Expand Down Expand Up @@ -371,8 +371,8 @@ def test_preprocess_layer_nonlinear_activation(
input_shape_with_batch_size = (5,) + input_shape_wo_batchsize
flatten_dim = np.prod(input_shape_with_batch_size)
inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size)
output_np_ref = layer(inputs_np).numpy()
output_np_new = activation_layer(layer_wo_activation(inputs_np)).numpy()
output_np_ref = K.convert_to_numpy(layer(inputs_np))
output_np_new = K.convert_to_numpy(activation_layer(layer_wo_activation(inputs_np)))
assert_almost_equal(output_np_new, output_np_ref)
# check same trainable weights
if not is_deellip_layer:
Expand Down
12 changes: 6 additions & 6 deletions tests/test_preprocess_keras_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ def test_split_activations_in_keras_model(toy_model):
input_shape_with_batch_size = (5,) + input_shape_wo_batchsize
flatten_dim = np.prod(input_shape_with_batch_size)
inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size)
output_np_ref = toy_model(inputs_np).numpy()
output_np_new = converted_model(inputs_np).numpy()
output_np_ref = K.convert_to_numpy(toy_model(inputs_np))
output_np_new = K.convert_to_numpy(converted_model(inputs_np))
assert_almost_equal(output_np_new, output_np_ref, decimal=4)


Expand Down Expand Up @@ -92,8 +92,8 @@ def test_convert_deellip_layers_in_keras_model_ok():
input_shape_with_batch_size = (5,) + input_shape_wo_batchsize
flatten_dim = np.prod(input_shape_with_batch_size)
inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size)
output_np_ref = model(inputs_np).numpy()
output_np_new = converted_model(inputs_np).numpy()
output_np_ref = K.convert_to_numpy(model(inputs_np))
output_np_new = K.convert_to_numpy(converted_model(inputs_np))
assert_almost_equal(output_np_new, output_np_ref, decimal=4)


Expand Down Expand Up @@ -164,6 +164,6 @@ def test_preprocess(
input_shape_with_batch_size = (5,) + input_shape_wo_batchsize
flatten_dim = np.prod(input_shape_with_batch_size)
inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size)
output_np_ref = model(inputs_np).numpy()
output_np_new = converted_model(inputs_np).numpy()
output_np_ref = K.convert_to_numpy(model(inputs_np))
output_np_new = K.convert_to_numpy(converted_model(inputs_np))
assert_almost_equal(output_np_new, output_np_ref, decimal=4)
3 changes: 2 additions & 1 deletion tutorials/tutorial4_certified_over_estimation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,7 @@
"\n",
"%matplotlib inline\n",
"import ipywidgets as widgets\n",
"import keras.ops\n",
"import numpy as np\n",
"from ipywidgets import interact\n",
"from keras.layers import Activation, Dense\n",
Expand Down Expand Up @@ -382,7 +383,7 @@
"metadata": {},
"outputs": [],
"source": [
"bias = model.layers[-1].bias.numpy()"
"bias = keras.ops.convert_to_numpy(model.layers[-1].bias)"
]
},
{
Expand Down

0 comments on commit 8ddd0b2

Please sign in to comment.