diff --git a/src/decomon/models/models.py b/src/decomon/models/models.py index a485c90f..b1ff067c 100644 --- a/src/decomon/models/models.py +++ b/src/decomon/models/models.py @@ -1,6 +1,7 @@ from typing import Any, Dict, List, Optional, Union import keras +import keras.ops as K import numpy as np from keras import Model from keras.utils import serialize_keras_object @@ -113,9 +114,9 @@ def predict_on_single_batch_np( """ output_tensors = self(inputs) if isinstance(output_tensors, list): - return [output.numpy() for output in output_tensors] + return [K.convert_to_numpy(output) for output in output_tensors] else: - return output_tensors.numpy() + return K.convert_to_numpy(output_tensors) def _check_domain( diff --git a/tests/conftest.py b/tests/conftest.py index 14c3967e..e3c9aa6c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -164,9 +164,9 @@ def __init__(self, inputs: List[KerasTensor], outputs: List[KerasTensor]): def __call__(self, inputs_: List[np.ndarray]): output_tensors = self._model(inputs_) if isinstance(output_tensors, list): - return [output.numpy() for output in output_tensors] + return [K.convert_to_numpy(output) for output in output_tensors] else: - return output_tensors.numpy() + return K.convert_to_numpy(output_tensors) class Helpers: @@ -220,9 +220,9 @@ def predict_on_small_numpy( """ output_tensors = model(x) if isinstance(output_tensors, list): - return [output.numpy() for output in output_tensors] + return [K.convert_to_numpy(output) for output in output_tensors] else: - return output_tensors.numpy() + return K.convert_to_numpy(output_tensors) @staticmethod def get_standard_values_1d_box(n, dc_decomp=True, grad_bounds=False, nb=100): diff --git a/tests/test_activation.py b/tests/test_activation.py index 9f543dd9..c41b7f36 100644 --- a/tests/test_activation.py +++ b/tests/test_activation.py @@ -37,7 +37,7 @@ def test_activation_1D_box(n, mode, floatx, decimal, helpers, activation_func, t input_ref_ = helpers.get_input_ref_from_full_inputs(inputs=inputs_) # reference output - output_ref_ = tensor_func(input_ref_).numpy() + output_ref_ = K.convert_to_numpy(tensor_func(input_ref_)) # decomon output output = activation_func(inputs_for_mode, dc_decomp=dc_decomp, mode=mode) diff --git a/tests/test_clone.py b/tests/test_clone.py index 127a8ffb..45e52eb1 100644 --- a/tests/test_clone.py +++ b/tests/test_clone.py @@ -258,9 +258,9 @@ def test_clone_full_deellip_model_forward(method, mode, helpers): # flatten inputs preprocess_layer = Flatten(data_format=data_format) - input_ref_reshaped_ = preprocess_layer(input_ref_).numpy() - input_ref_min_reshaped_ = preprocess_layer(input_ref_min_).numpy() - input_ref_max_reshaped_ = preprocess_layer(input_ref_max_).numpy() + input_ref_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_)) + input_ref_min_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_min_)) + input_ref_max_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_max_)) # decomon inputs input_decomon_ = np.concatenate((input_ref_min_reshaped_[:, None], input_ref_max_reshaped_[:, None]), axis=1) @@ -392,9 +392,9 @@ def test_convert_cnn(method, mode, helpers): # flatten inputs preprocess_layer = Flatten(data_format=data_format) - input_ref_reshaped_ = preprocess_layer(input_ref_).numpy() - input_ref_min_reshaped_ = preprocess_layer(input_ref_min_).numpy() - input_ref_max_reshaped_ = preprocess_layer(input_ref_max_).numpy() + input_ref_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_)) + input_ref_min_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_min_)) + input_ref_max_reshaped_ = K.convert_to_numpy(preprocess_layer(input_ref_max_)) input_decomon_ = np.concatenate((input_ref_min_reshaped_[:, None], input_ref_max_reshaped_[:, None]), axis=1) diff --git a/tests/test_decomon_reset_layer.py b/tests/test_decomon_reset_layer.py index 21bcc592..ce0634f4 100644 --- a/tests/test_decomon_reset_layer.py +++ b/tests/test_decomon_reset_layer.py @@ -35,11 +35,11 @@ def test_decomondense_reset_layer(helpers, use_bias): decomon_layer.reset_layer(layer) assert decomon_layer.kernel is not layer.kernel - assert_almost_equal(decomon_layer.kernel.numpy(), layer.kernel.numpy()) + assert_almost_equal(K.convert_to_numpy(decomon_layer.kernel), K.convert_to_numpy(layer.kernel)) if use_bias: assert len(layer.weights) == 2 assert decomon_layer.bias is not layer.bias - assert_almost_equal(decomon_layer.bias.numpy(), layer.bias.numpy()) + assert_almost_equal(K.convert_to_numpy(decomon_layer.bias), K.convert_to_numpy(layer.bias)) else: assert len(layer.weights) == 1 @@ -70,8 +70,8 @@ def test_decomondense_reset_layer_decomon_with_new_weights(helpers): decomon_layer.reset_layer(layer) assert decomon_layer.kernel is not layer.kernel assert decomon_layer.bias is not layer.bias - assert_almost_equal(decomon_layer.kernel.numpy(), layer.kernel.numpy()) - assert_almost_equal(decomon_layer.bias.numpy(), layer.bias.numpy()) + assert_almost_equal(K.convert_to_numpy(decomon_layer.kernel), K.convert_to_numpy(layer.kernel)) + assert_almost_equal(K.convert_to_numpy(decomon_layer.bias), K.convert_to_numpy(layer.bias)) def test_decomondense_reset_layer_keras_with_new_weights(helpers): @@ -99,8 +99,8 @@ def test_decomondense_reset_layer_keras_with_new_weights(helpers): decomon_layer.reset_layer(layer) assert decomon_layer.kernel is not layer.kernel assert decomon_layer.bias is not layer.bias - assert_almost_equal(decomon_layer.kernel.numpy(), layer.kernel.numpy()) - assert_almost_equal(decomon_layer.bias.numpy(), layer.bias.numpy()) + assert_almost_equal(K.convert_to_numpy(decomon_layer.kernel), K.convert_to_numpy(layer.kernel)) + assert_almost_equal(K.convert_to_numpy(decomon_layer.bias), K.convert_to_numpy(layer.bias)) def test_decomondense_reset_layer_ko_keraslayer_not_nuilt(): @@ -155,10 +155,10 @@ def test_decomonconv2d_reset_layer(helpers, use_bias): decomon_layer.reset_layer(layer) assert decomon_layer.kernel is not layer.kernel - assert_almost_equal(decomon_layer.kernel.numpy(), layer.kernel.numpy()) + assert_almost_equal(K.convert_to_numpy(decomon_layer.kernel), K.convert_to_numpy(layer.kernel)) if use_bias: assert decomon_layer.bias is not layer.bias - assert_almost_equal(decomon_layer.bias.numpy(), layer.bias.numpy()) + assert_almost_equal(K.convert_to_numpy(decomon_layer.bias), K.convert_to_numpy(layer.bias)) @pytest.mark.parametrize( diff --git a/tests/test_models_utils.py b/tests/test_models_utils.py index 39fce69e..80f3eab5 100644 --- a/tests/test_models_utils.py +++ b/tests/test_models_utils.py @@ -67,8 +67,8 @@ def test_split_activation_do_split( input_shape_with_batch_size = (5,) + input_shape_wo_batchsize flatten_dim = np.prod(input_shape_with_batch_size) inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size) - output_np_ref = layer(inputs_np).numpy() - output_np_new = activation_layer(layer_wo_activation(inputs_np)).numpy() + output_np_ref = K.convert_to_numpy(layer(inputs_np)) + output_np_new = K.convert_to_numpy(activation_layer(layer_wo_activation(inputs_np))) assert_almost_equal(output_np_new, output_np_ref) # check same trainable weights original_layer_weights = layer.get_weights() @@ -158,8 +158,8 @@ def test_split_activation_do_split_with_deellip( input_shape_with_batch_size = (5,) + input_shape_wo_batchsize flatten_dim = np.prod(input_shape_with_batch_size) inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size) - output_np_ref = layer(inputs_np).numpy() - output_np_new = activation_layer(layer_wo_activation(inputs_np)).numpy() + output_np_ref = K.convert_to_numpy(layer(inputs_np)) + output_np_new = K.convert_to_numpy(activation_layer(layer_wo_activation(inputs_np))) assert_almost_equal(output_np_new, output_np_ref) # check same trainable weights original_layer_weights = layer.get_weights() @@ -233,8 +233,8 @@ def test_convert_deellip_to_keras_spectraldense(): assert keras_layer.name.startswith(layer.name) # same output? input_tensor = K.ones((4, 1)) - output_ref = layer(input_tensor).numpy() - new_output = keras_layer(input_tensor).numpy() + output_ref = K.convert_to_numpy(layer(input_tensor)) + new_output = K.convert_to_numpy(keras_layer(input_tensor)) assert_almost_equal(new_output, output_ref) # idempotency keras_layer2 = convert_deellip_to_keras(keras_layer) @@ -371,8 +371,8 @@ def test_preprocess_layer_nonlinear_activation( input_shape_with_batch_size = (5,) + input_shape_wo_batchsize flatten_dim = np.prod(input_shape_with_batch_size) inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size) - output_np_ref = layer(inputs_np).numpy() - output_np_new = activation_layer(layer_wo_activation(inputs_np)).numpy() + output_np_ref = K.convert_to_numpy(layer(inputs_np)) + output_np_new = K.convert_to_numpy(activation_layer(layer_wo_activation(inputs_np))) assert_almost_equal(output_np_new, output_np_ref) # check same trainable weights if not is_deellip_layer: diff --git a/tests/test_preprocess_keras_model.py b/tests/test_preprocess_keras_model.py index 48fb77b7..e1e7d116 100644 --- a/tests/test_preprocess_keras_model.py +++ b/tests/test_preprocess_keras_model.py @@ -52,8 +52,8 @@ def test_split_activations_in_keras_model(toy_model): input_shape_with_batch_size = (5,) + input_shape_wo_batchsize flatten_dim = np.prod(input_shape_with_batch_size) inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size) - output_np_ref = toy_model(inputs_np).numpy() - output_np_new = converted_model(inputs_np).numpy() + output_np_ref = K.convert_to_numpy(toy_model(inputs_np)) + output_np_new = K.convert_to_numpy(converted_model(inputs_np)) assert_almost_equal(output_np_new, output_np_ref, decimal=4) @@ -92,8 +92,8 @@ def test_convert_deellip_layers_in_keras_model_ok(): input_shape_with_batch_size = (5,) + input_shape_wo_batchsize flatten_dim = np.prod(input_shape_with_batch_size) inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size) - output_np_ref = model(inputs_np).numpy() - output_np_new = converted_model(inputs_np).numpy() + output_np_ref = K.convert_to_numpy(model(inputs_np)) + output_np_new = K.convert_to_numpy(converted_model(inputs_np)) assert_almost_equal(output_np_new, output_np_ref, decimal=4) @@ -164,6 +164,6 @@ def test_preprocess( input_shape_with_batch_size = (5,) + input_shape_wo_batchsize flatten_dim = np.prod(input_shape_with_batch_size) inputs_np = np.linspace(-1, 1, flatten_dim).reshape(input_shape_with_batch_size) - output_np_ref = model(inputs_np).numpy() - output_np_new = converted_model(inputs_np).numpy() + output_np_ref = K.convert_to_numpy(model(inputs_np)) + output_np_new = K.convert_to_numpy(converted_model(inputs_np)) assert_almost_equal(output_np_new, output_np_ref, decimal=4) diff --git a/tutorials/tutorial4_certified_over_estimation.ipynb b/tutorials/tutorial4_certified_over_estimation.ipynb index e1ef83a7..c8bbf577 100644 --- a/tutorials/tutorial4_certified_over_estimation.ipynb +++ b/tutorials/tutorial4_certified_over_estimation.ipynb @@ -165,6 +165,7 @@ "\n", "%matplotlib inline\n", "import ipywidgets as widgets\n", + "import keras.ops\n", "import numpy as np\n", "from ipywidgets import interact\n", "from keras.layers import Activation, Dense\n", @@ -382,7 +383,7 @@ "metadata": {}, "outputs": [], "source": [ - "bias = model.layers[-1].bias.numpy()" + "bias = keras.ops.convert_to_numpy(model.layers[-1].bias)" ] }, {