diff --git a/.vscode/launch.json b/.vscode/launch.json index c0ab2348..0be7bc2a 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -146,7 +146,7 @@ { "name": "AgingEvolution", - "type": "python", + "type": "debugpy", "request": "launch", "module": "hannah.tools.train", "justMyCode": false, @@ -162,12 +162,26 @@ "trainer.overfit_batches=1", "nas.n_jobs=1", "nas.budget=100", - "nas.total_candidates=50", - "nas.num_selected_candidates=20", - "nas.sampler.population_size=20", - // "nas.predictor.model.input_feature_size=31", + "nas.total_candidates=5", + "nas.num_selected_candidates=2", + "nas.sampler.population_size=2", + // "nas.predictor.model.input_feature_size=35", "module.num_workers=8", - "experiment_id=test_constraint_config", + "experiment_id=test_merge", + "fx_mac_summary=True", + // "~nas.predictor", + "~normalizer" + ] + }, + { + "name": "ChainedLinear", + "type": "python", + "request": "launch", + "module": "hannah.tools.train", + "justMyCode": false, + "cwd": "${workspaceFolder}/experiments/kws", + "args": [ + "+experiment=ae_nas" "fx_mac_summary=True", // "~nas.predictor", "~normalizer" diff --git a/doc/nas/search_spaces.md b/doc/nas/search_spaces.md index 80194e92..74584873 100644 --- a/doc/nas/search_spaces.md +++ b/doc/nas/search_spaces.md @@ -20,6 +20,16 @@ from hannah.nas.functional_operators.operators import Conv2d ## Basic Building Blocks +### Search Space Wrapper +To define the beginning and end of a search space, the definition has to be enclosed in a function returning the (last node of the) search space graph. +This function must use the `@search_space` decorator to indicate that this is the main search space enclosing function. + + + +```python +from hannah.nas.functional_operators.op import search_space +``` + ### Ops & Tensors **Op** nodes represent the operators used in the networks of the search space. Their basic syntax is @@ -38,18 +48,22 @@ defines attributes that the data has at this point in the graph (e.g., shape, ax from hannah.nas.functional_operators.operators import Conv2d from hannah.nas.functional_operators.op import Tensor -input = Tensor(name='input', shape=(1, 3, 32, 32), axis=("N", "C", "H", "W")) -weight = Tensor(name='weight', shape=(32, 3, 1, 1), axis=("O", "I", "kH", "kW")) +@search_space +def simple_search_space(): + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=("N", "C", "H", "W")) + weight = Tensor(name='weight', shape=(32, 3, 1, 1), axis=("O", "I", "kH", "kW")) -conv = Conv2d(stride=2, dilation=1) # Define operator and parametrization -graph = conv(input, weight) # Define/create/extend graph + conv = Conv2d(stride=2, dilation=1) # Define operator and parametrization + graph = conv(input, weight) # Define/create/extend graph + return graph +graph = simple_search_space() graph ``` - Conv2d(Conv2d_0) + Conv2d(simple_search_space_0.Conv2d_0) @@ -76,19 +90,23 @@ To build a search space it is not sufficient to feed scalar values to operator p ```python from hannah.nas.parameters.parameters import CategoricalParameter, IntScalarParameter -input = Tensor(name='input', shape=(1, 3, 32, 32), axis=("N", "C", "H", "W")) -weight = Tensor(name='weight', shape=(IntScalarParameter(min=8, max=64, name='out_channels'), 3, 1, 1), axis=("O", "I", "kH", "kW")) +@search_space +def simple_parametrized_search_space(): + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=("N", "C", "H", "W")) + weight = Tensor(name='weight', shape=(IntScalarParameter(min=8, max=64, name='out_channels'), 3, 1, 1), axis=("O", "I", "kH", "kW")) -# a search space with stride 1 and stride 2 convolutions -graph = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(input, weight) + # a search space with stride 1 and stride 2 convolutions + graph = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(input, weight) + return graph +graph = simple_parametrized_search_space() graph.parametrization(flatten=True) ``` - {'Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = Conv2d_0.weight.out_channels, min = 8, max = 64, step_size = 1, current_value = 8), - None: CategoricalParameter(rng = Generator(PCG64), name = stride, id = None, choices = [1, 2], current_value = 1)} + {'simple_parametrized_search_space_0.Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = simple_parametrized_search_space_0.Conv2d_0.stride, _registered = True, choices = [1, 2], current_value = 2), + 'simple_parametrized_search_space_0.Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = simple_parametrized_search_space_0.Conv2d_0.weight.out_channels, _registered = True, min = 8, max = 64, step_size = 1, current_value = 8)} @@ -97,16 +115,20 @@ encoding properties of the search space symbolically. One common use-case is sym ```python -in_channel = 3 +in_channel = 3 kernel_size = 1 input = Tensor(name='input', shape=(1, in_channel, 32, 32), axis=('N', 'C', 'H', 'W')) -weight_0 = Tensor(name='weight', - shape=(IntScalarParameter(min=8, max=64, name='out_channels'), in_channel, kernel_size, kernel_size), - axis=("O", "I", "kH", "kW")) -conv_0 = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(input, weight_0) +@search_space +def simple_search_space(input): + weight_0 = Tensor(name='weight', + shape=(IntScalarParameter(min=8, max=64, name='out_channels'), in_channel, kernel_size, kernel_size), + axis=("O", "I", "kH", "kW")) + conv_0 = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(input, weight_0) + return conv_0 +out = simple_search_space(input) ``` How can we know the output shape of `conv_0`, e.g., to put it into the weight tensor of a following convolution, without knowing what value @@ -117,13 +139,13 @@ are then only evaluated at sampling and during the forward. ```python print("Input shape: ", input.shape()) -print("Weight shape: ", weight_0.shape()) -print("Convolution output shape:", conv_0.shape()) +print("Weight shape: ", out.operands[1].shape()) +print("Convolution output shape:", out.shape()) ``` Input shape: (1, 3, 32, 32) - Weight shape: (IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = Conv2d_0.weight.out_channels, min = 8, max = 64, step_size = 1, current_value = 8), 3, 1, 1) - Convolution output shape: (1, IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = Conv2d_0.weight.out_channels, min = 8, max = 64, step_size = 1, current_value = 8), , ) + Weight shape: (IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = simple_search_space_0.Conv2d_0.weight.out_channels, _registered = True, min = 8, max = 64, step_size = 1, current_value = 8), 3, 1, 1) + Convolution output shape: (1, IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = simple_search_space_0.Conv2d_0.weight.out_channels, _registered = True, min = 8, max = 64, step_size = 1, current_value = 8), , ) The `lazy` keyword can be used to evaluate values which *might* be parameters (but could also be `int` or else). @@ -134,8 +156,8 @@ from hannah.nas.functional_operators.lazy import lazy print("Input shape: ", [lazy(i) for i in input.shape()]) -print("Weight shape: ", [lazy(i) for i in weight_0.shape()]) -print("Convolution output shape:", [lazy(i) for i in conv_0.shape()]) +print("Weight shape: ", [lazy(i) for i in out.operands[1].shape()]) +print("Convolution output shape:", [lazy(i) for i in out.shape()]) ``` Input shape: [1, 3, 32, 32] @@ -153,33 +175,38 @@ As seen in the simple examples above, we can chain op and tensor nodes together ```python from hannah.nas.functional_operators.operators import Relu -input = Tensor(name='input', - shape=(1, 3, 32, 32), - axis=('N', 'C', 'H', 'W')) -weight_0 = Tensor(name='weight', shape=(IntScalarParameter(min=8, max=64, name='out_channels'), 3, 1, 1), axis=("O", "I", "kH", "kW")) +@search_space +def simple_search_space(): + input = Tensor(name='input', + shape=(1, 3, 32, 32), + axis=('N', 'C', 'H', 'W')) -conv_0 = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(input, weight_0) -relu_0 = Relu()(conv_0) + weight_0 = Tensor(name='weight', shape=(IntScalarParameter(min=8, max=64, name='out_channels'), 3, 1, 1), axis=("O", "I", "kH", "kW")) -weight_1 = Tensor(name='weight', shape=(IntScalarParameter(min=32, max=64, name='out_channels'), conv_0.shape()[1], 3, 3), axis=("O", "I", "kH", "kW")) -conv_1 = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(relu_0, weight_1) -relu_1 = Relu()(conv_1) + conv_0 = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(input, weight_0) + relu_0 = Relu()(conv_0) + + weight_1 = Tensor(name='weight', shape=(IntScalarParameter(min=32, max=64, name='out_channels'), conv_0.shape()[1], 3, 3), axis=("O", "I", "kH", "kW")) + conv_1 = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(relu_0, weight_1) + relu_1 = Relu()(conv_1) + return relu_1 +out = simple_search_space() ``` ```python -relu_1.parametrization(flatten=True) +out.parametrization(flatten=True) ``` - {'Conv2d_1.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = Conv2d_1.weight.out_channels, min = 32, max = 64, step_size = 1, current_value = 32), - 'Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = Conv2d_0.weight.out_channels, min = 8, max = 64, step_size = 1, current_value = 8), - 'Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = Conv2d_0.stride, choices = [1, 2], current_value = 2), - 'Conv2d_1.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = Conv2d_1.stride, choices = [1, 2], current_value = 2)} + {'simple_search_space_0.Conv2d_1.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = simple_search_space_0.Conv2d_1.stride, _registered = True, choices = [1, 2], current_value = 2), + 'simple_search_space_0.Conv2d_1.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = simple_search_space_0.Conv2d_1.weight.out_channels, _registered = True, min = 32, max = 64, step_size = 1, current_value = 32), + 'simple_search_space_0.Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = simple_search_space_0.Conv2d_0.stride, _registered = True, choices = [1, 2], current_value = 2), + 'simple_search_space_0.Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = simple_search_space_0.Conv2d_0.weight.out_channels, _registered = True, min = 8, max = 64, step_size = 1, current_value = 8)} @@ -190,33 +217,33 @@ With helper functions like `get_nodes` one can iterate through all graph nodes. ```python from hannah.nas.functional_operators.op import get_nodes -print("Relu Operands: ", relu_1.operands) -print("Conv Users: ", relu_1.operands[0].users) +print("Relu Operands: ", out.operands) +print("Conv Users: ", out.operands[0].users) print("\nNodes:") -for node in get_nodes(relu_1): +for node in get_nodes(out): print('Node:', node) print('\tOperands: ', node.operands) ``` - Relu Operands: [Conv2d(Conv2d_1)] - Conv Users: [Relu(Relu_1)] + Relu Operands: [Conv2d(simple_search_space_0.Conv2d_1)] + Conv Users: [Relu(simple_search_space_0.Relu_1)] Nodes: - Node: Relu(Relu_1) - Operands: [Conv2d(Conv2d_1)] - Node: Conv2d(Conv2d_1) - Operands: [Relu(Relu_0), Tensor(Conv2d_1.weight)] - Node: Tensor(Conv2d_1.weight) + Node: Relu(simple_search_space_0.Relu_1) + Operands: [Conv2d(simple_search_space_0.Conv2d_1)] + Node: Conv2d(simple_search_space_0.Conv2d_1) + Operands: [Relu(simple_search_space_0.Relu_0), Tensor(simple_search_space_0.Conv2d_1.weight)] + Node: Tensor(simple_search_space_0.Conv2d_1.weight) Operands: [] - Node: Relu(Relu_0) - Operands: [Conv2d(Conv2d_0)] - Node: Conv2d(Conv2d_0) - Operands: [Tensor(input), Tensor(Conv2d_0.weight)] - Node: Tensor(Conv2d_0.weight) + Node: Relu(simple_search_space_0.Relu_0) + Operands: [Conv2d(simple_search_space_0.Conv2d_0)] + Node: Conv2d(simple_search_space_0.Conv2d_0) + Operands: [Tensor(simple_search_space_0.input), Tensor(simple_search_space_0.Conv2d_0.weight)] + Node: Tensor(simple_search_space_0.Conv2d_0.weight) Operands: [] - Node: Tensor(input) + Node: Tensor(simple_search_space_0.input) Operands: [] @@ -243,23 +270,27 @@ def conv_relu(input, kernel_size, out_channels, stride): input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) +@search_space +def space(input): + kernel_size = CategoricalParameter(name="kernel_size", choices=[1, 3, 5]) + stride = CategoricalParameter(name="stride", choices=[1, 2]) + out_channels = IntScalarParameter(name="out_channels", min=8, max=64) + net = conv_relu(input, kernel_size=kernel_size, out_channels=out_channels, stride=stride) + net = conv_relu(net, kernel_size=kernel_size, out_channels=out_channels, stride=stride) + return net -kernel_size = CategoricalParameter(name="kernel_size", choices=[1, 3, 5]) -stride = CategoricalParameter(name="stride", choices=[1, 2]) -out_channels = IntScalarParameter(name="out_channels", min=8, max=64) -net = conv_relu(input, kernel_size=kernel_size, out_channels=out_channels, stride=stride) -net = conv_relu(net, kernel_size=kernel_size, out_channels=out_channels, stride=stride) +net = space(input) for n in get_nodes(net): print(n) ``` - Relu(Relu_1) - Conv2d(Conv2d_1) - Tensor(Conv2d_1.weight) - Relu(Relu_0) - Conv2d(Conv2d_0) - Tensor(Conv2d_0.weight) + Relu(space_0.Relu_1) + Conv2d(space_0.Conv2d_1) + Tensor(space_0.Conv2d_1.weight) + Relu(space_0.Relu_0) + Conv2d(space_0.Conv2d_0) + Tensor(space_0.Conv2d_0.weight) Tensor(input) @@ -271,9 +302,9 @@ net.parametrization(flatten=True) - {'Conv2d_0.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = Conv2d_0.weight.kernel_size, choices = [1, 3, 5], current_value = 5), - 'Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = Conv2d_0.weight.out_channels, min = 8, max = 64, step_size = 1, current_value = 8), - 'Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = Conv2d_0.stride, choices = [1, 2], current_value = 1)} + {'space_0.Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = space_0.Conv2d_0.stride, _registered = True, choices = [1, 2], current_value = 1), + 'space_0.Conv2d_0.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = space_0.Conv2d_0.weight.kernel_size, _registered = True, choices = [1, 3, 5], current_value = 1), + 'space_0.Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = space_0.Conv2d_0.weight.out_channels, _registered = True, min = 8, max = 64, step_size = 1, current_value = 8)} @@ -284,12 +315,15 @@ Note, how there is just one set of parameters. If defined this way, both blocks input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) - -kernel_size = CategoricalParameter(name="kernel_size", choices=[1, 3, 5]) -stride = CategoricalParameter(name="stride", choices=[1, 2]) -out_channels = IntScalarParameter(name="out_channels", min=8, max=64) -net = conv_relu(input, kernel_size=kernel_size.new(), out_channels=out_channels.new(), stride=stride.new()) -net = conv_relu(net, kernel_size=kernel_size.new(), out_channels=out_channels.new(), stride=stride.new()) +@search_space +def space(input): + kernel_size = CategoricalParameter(name="kernel_size", choices=[1, 3, 5]) + stride = CategoricalParameter(name="stride", choices=[1, 2]) + out_channels = IntScalarParameter(name="out_channels", min=8, max=64) + net = conv_relu(input, kernel_size=kernel_size.new(), out_channels=out_channels.new(), stride=stride.new()) + net = conv_relu(net, kernel_size=kernel_size.new(), out_channels=out_channels.new(), stride=stride.new()) + return net +net = space(input) net.parametrization(flatten=True) ``` @@ -297,12 +331,12 @@ net.parametrization(flatten=True) - {'Conv2d_1.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = Conv2d_1.weight.kernel_size, choices = [1, 3, 5], current_value = 3), - 'Conv2d_1.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = Conv2d_1.weight.out_channels, min = 8, max = 64, step_size = 1, current_value = 8), - 'Conv2d_0.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = Conv2d_0.weight.kernel_size, choices = [1, 3, 5], current_value = 3), - 'Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = Conv2d_0.weight.out_channels, min = 8, max = 64, step_size = 1, current_value = 8), - 'Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = Conv2d_0.stride, choices = [1, 2], current_value = 2), - 'Conv2d_1.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = Conv2d_1.stride, choices = [1, 2], current_value = 2)} + {'space_0.Conv2d_1.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = space_0.Conv2d_1.stride, _registered = True, choices = [1, 2], current_value = 2), + 'space_0.Conv2d_1.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = space_0.Conv2d_1.weight.kernel_size, _registered = True, choices = [1, 3, 5], current_value = 5), + 'space_0.Conv2d_1.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = space_0.Conv2d_1.weight.out_channels, _registered = True, min = 8, max = 64, step_size = 1, current_value = 8), + 'space_0.Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = space_0.Conv2d_0.stride, _registered = True, choices = [1, 2], current_value = 2), + 'space_0.Conv2d_0.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = space_0.Conv2d_0.weight.kernel_size, _registered = True, choices = [1, 3, 5], current_value = 5), + 'space_0.Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = space_0.Conv2d_0.weight.out_channels, _registered = True, min = 8, max = 64, step_size = 1, current_value = 8)} @@ -322,31 +356,35 @@ def block(input): input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) -net = block(input) -net = block(net) +@search_space +def space(input): + net = block(input) + net = block(net) + return net +net = space(input) for n in get_nodes(net): print(n) ``` - Relu(Relu_5) - Conv2d(Conv2d_5) - Tensor(Conv2d_5.weight) - Relu(Relu_4) - Conv2d(Conv2d_4) - Tensor(Conv2d_4.weight) - Relu(Relu_3) - Conv2d(Conv2d_3) - Tensor(Conv2d_3.weight) - Relu(Relu_2) - Conv2d(Conv2d_2) - Tensor(Conv2d_2.weight) - Relu(Relu_1) - Conv2d(Conv2d_1) - Tensor(Conv2d_1.weight) - Relu(Relu_0) - Conv2d(Conv2d_0) - Tensor(Conv2d_0.weight) + Relu(space_0.Relu_5) + Conv2d(space_0.Conv2d_5) + Tensor(space_0.Conv2d_5.weight) + Relu(space_0.Relu_4) + Conv2d(space_0.Conv2d_4) + Tensor(space_0.Conv2d_4.weight) + Relu(space_0.Relu_3) + Conv2d(space_0.Conv2d_3) + Tensor(space_0.Conv2d_3.weight) + Relu(space_0.Relu_2) + Conv2d(space_0.Conv2d_2) + Tensor(space_0.Conv2d_2.weight) + Relu(space_0.Relu_1) + Conv2d(space_0.Conv2d_1) + Tensor(space_0.Conv2d_1.weight) + Relu(space_0.Relu_0) + Conv2d(space_0.Conv2d_0) + Tensor(space_0.Conv2d_0.weight) Tensor(input) @@ -381,34 +419,39 @@ def block(input): net = conv_relu(net, kernel_size=kernel_size.new(), out_channels=out_channels.new(), stride=stride.new()) return net + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) -net = block(input) -net = block(net) +@search_space +def space(input): + net = block(input) + net = block(net) + return net +net = space(input) for n in get_nodes(net): print(n) ``` - Relu(block_1.conv_relu_2.Relu_0) - Conv2d(block_1.conv_relu_2.Conv2d_0) - Tensor(block_1.conv_relu_2.Conv2d_0.weight) - Relu(block_1.conv_relu_1.Relu_0) - Conv2d(block_1.conv_relu_1.Conv2d_0) - Tensor(block_1.conv_relu_1.Conv2d_0.weight) - Relu(block_1.conv_relu_0.Relu_0) - Conv2d(block_1.conv_relu_0.Conv2d_0) - Tensor(block_1.conv_relu_0.Conv2d_0.weight) - Relu(block_0.conv_relu_2.Relu_0) - Conv2d(block_0.conv_relu_2.Conv2d_0) - Tensor(block_0.conv_relu_2.Conv2d_0.weight) - Relu(block_0.conv_relu_1.Relu_0) - Conv2d(block_0.conv_relu_1.Conv2d_0) - Tensor(block_0.conv_relu_1.Conv2d_0.weight) - Relu(block_0.conv_relu_0.Relu_0) - Conv2d(block_0.conv_relu_0.Conv2d_0) - Tensor(block_0.conv_relu_0.Conv2d_0.weight) + Relu(space_0.block_1.conv_relu_2.Relu_0) + Conv2d(space_0.block_1.conv_relu_2.Conv2d_0) + Tensor(space_0.block_1.conv_relu_2.Conv2d_0.weight) + Relu(space_0.block_1.conv_relu_1.Relu_0) + Conv2d(space_0.block_1.conv_relu_1.Conv2d_0) + Tensor(space_0.block_1.conv_relu_1.Conv2d_0.weight) + Relu(space_0.block_1.conv_relu_0.Relu_0) + Conv2d(space_0.block_1.conv_relu_0.Conv2d_0) + Tensor(space_0.block_1.conv_relu_0.Conv2d_0.weight) + Relu(space_0.block_0.conv_relu_2.Relu_0) + Conv2d(space_0.block_0.conv_relu_2.Conv2d_0) + Tensor(space_0.block_0.conv_relu_2.Conv2d_0.weight) + Relu(space_0.block_0.conv_relu_1.Relu_0) + Conv2d(space_0.block_0.conv_relu_1.Conv2d_0) + Tensor(space_0.block_0.conv_relu_1.Conv2d_0.weight) + Relu(space_0.block_0.conv_relu_0.Relu_0) + Conv2d(space_0.block_0.conv_relu_0.Conv2d_0) + Tensor(space_0.block_0.conv_relu_0.Conv2d_0.weight) Tensor(input) @@ -433,14 +476,24 @@ def choice_block(input): net = ChoiceOp(identity, optional_conv)(input) return net - + ``` ```python +kernel_size = CategoricalParameter(name="kernel_size", choices=[1, 3, 5]) +stride = CategoricalParameter(name="stride", choices=[1, 2]) +out_channels = IntScalarParameter(name="out_channels", min=8, max=64) + + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) -conv = conv_relu(input, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) -net = choice_block(conv) + +@search_space +def space(input, out_channels, stride, kernel_size): + conv = conv_relu(input, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) + net = choice_block(conv) + return net +net = space(input, out_channels, stride, kernel_size) net.parametrization(flatten=True) @@ -449,13 +502,13 @@ net.parametrization(flatten=True) - {'choice_block_0.ChoiceOp_0.choice': IntScalarParameter(rng = Generator(PCG64), name = choice, id = choice_block_0.ChoiceOp_0.choice, min = 0, max = 1, step_size = 1, current_value = 0), - 'choice_block_0.conv_relu_1.Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = choice_block_0.conv_relu_1.Conv2d_0.stride, choices = [1, 2], current_value = 2), - 'choice_block_0.conv_relu_1.Conv2d_0.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = choice_block_0.conv_relu_1.Conv2d_0.weight.kernel_size, choices = [1, 3, 5], current_value = 5), - 'choice_block_0.conv_relu_1.Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = choice_block_0.conv_relu_1.Conv2d_0.weight.out_channels, min = 4, max = 64, step_size = 1, current_value = 4), - 'conv_relu_0.Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = conv_relu_0.Conv2d_0.stride, choices = [1, 2], current_value = 2), - 'conv_relu_0.Conv2d_0.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = conv_relu_0.Conv2d_0.weight.kernel_size, choices = [1, 3, 5], current_value = 3), - 'conv_relu_0.Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = conv_relu_0.Conv2d_0.weight.out_channels, min = 8, max = 64, step_size = 1, current_value = 8)} + {'space_0.choice_block_0.ChoiceOp_0.choice': IntScalarParameter(rng = Generator(PCG64), name = choice, id = space_0.choice_block_0.ChoiceOp_0.choice, _registered = True, min = 0, max = 1, step_size = 1, current_value = 0), + 'space_0.choice_block_0.conv_relu_0.Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = space_0.choice_block_0.conv_relu_0.Conv2d_0.stride, _registered = True, choices = [1, 2], current_value = 1), + 'space_0.choice_block_0.conv_relu_0.Conv2d_0.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = space_0.choice_block_0.conv_relu_0.Conv2d_0.weight.kernel_size, _registered = True, choices = [1, 3, 5], current_value = 1), + 'space_0.choice_block_0.conv_relu_0.Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = space_0.choice_block_0.conv_relu_0.Conv2d_0.weight.out_channels, _registered = True, min = 4, max = 64, step_size = 1, current_value = 4), + 'space_0.conv_relu_0.Conv2d_0.stride': CategoricalParameter(rng = Generator(PCG64), name = stride, id = space_0.conv_relu_0.Conv2d_0.stride, _registered = True, choices = [1, 2], current_value = 1), + 'space_0.conv_relu_0.Conv2d_0.weight.kernel_size': CategoricalParameter(rng = Generator(PCG64), name = kernel_size, id = space_0.conv_relu_0.Conv2d_0.weight.kernel_size, _registered = True, choices = [1, 3, 5], current_value = 3), + 'space_0.conv_relu_0.Conv2d_0.weight.out_channels': IntScalarParameter(rng = Generator(PCG64), name = out_channels, id = space_0.conv_relu_0.Conv2d_0.weight.out_channels, _registered = True, min = 8, max = 64, step_size = 1, current_value = 8)} @@ -494,9 +547,12 @@ from hannah.nas.functional_operators.executor import BasicExecutor input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) -net = block(input) -net = block(net) - +@search_space +def space(input): + net = block(input) + net = block(net) + return net +net = space(input) model = BasicExecutor(net) model.initialize() @@ -507,44 +563,49 @@ model.forward(x) - tensor([[[[0.0000, 0.0000, 0.0000, 0.0000], + tensor([[[[0.2717, 0.0092, 0.1203, 0.1979], + [0.0000, 0.2005, 0.0972, 0.0256], + [0.1351, 0.1363, 0.0754, 0.1609], + [0.0000, 0.1031, 0.0446, 0.2227]], + + [[0.2462, 0.0013, 0.0224, 0.0534], + [0.2030, 0.1310, 0.0000, 0.0404], + [0.1303, 0.1276, 0.0634, 0.1498], + [0.1786, 0.0298, 0.0085, 0.1301]], + + [[0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 0.0000]], - [[0.0255, 0.0000, 0.0000, 0.0000], - [0.0000, 0.0152, 0.0000, 0.0000], - [0.0000, 0.0000, 0.0898, 0.0000], - [0.1132, 0.0894, 0.0094, 0.0138]], - - [[0.0000, 0.0000, 0.0365, 0.0000], - [0.0000, 0.1532, 0.0000, 0.2529], - [0.0000, 0.0859, 0.0396, 0.0000], - [0.0000, 0.2311, 0.0757, 0.0000]], - - [[0.0000, 0.1285, 0.1754, 0.0000], - [0.1788, 0.1729, 0.1973, 0.1036], - [0.1823, 0.2994, 0.2293, 0.2580], - [0.0554, 0.2454, 0.1355, 0.3018]], + [[0.0000, 0.0021, 0.0000, 0.0000], + [0.0000, 0.0000, 0.0232, 0.0000], + [0.0000, 0.0000, 0.0000, 0.0000], + [0.0000, 0.0000, 0.0011, 0.0000]], - [[0.0000, 0.0234, 0.0000, 0.0000], - [0.0725, 0.0212, 0.0615, 0.0960], - [0.1040, 0.0960, 0.1613, 0.0927], - [0.1025, 0.0846, 0.0000, 0.0424]], + [[0.7481, 0.0018, 0.2029, 0.1693], + [0.7117, 0.3248, 0.1578, 0.1085], + [0.3086, 0.3926, 0.1606, 0.3065], + [0.5410, 0.1157, 0.0583, 0.4534]], - [[0.0000, 0.0000, 0.0672, 0.0818], - [0.0000, 0.1420, 0.0404, 0.0326], - [0.0000, 0.0000, 0.0000, 0.1140], - [0.0000, 0.1518, 0.1521, 0.2088]], + [[0.0000, 0.0000, 0.0705, 0.0628], + [0.0000, 0.0000, 0.1682, 0.0000], + [0.0000, 0.0000, 0.0000, 0.0000], + [0.0000, 0.0381, 0.0255, 0.0000]], - [[0.0000, 0.0995, 0.1362, 0.0000], - [0.0000, 0.1206, 0.0000, 0.0000], - [0.0000, 0.1001, 0.0000, 0.0000], - [0.0000, 0.0000, 0.0000, 0.0435]], + [[0.7549, 0.0092, 0.2340, 0.1351], + [0.7965, 0.1582, 0.2039, 0.0925], + [0.2619, 0.3976, 0.1461, 0.1876], + [0.5799, 0.0848, 0.0732, 0.4952]], - [[0.0000, 0.0000, 0.0000, 0.0245], - [0.0000, 0.0938, 0.0000, 0.0763], - [0.0000, 0.0000, 0.0000, 0.0000], - [0.0000, 0.0000, 0.0000, 0.0000]]]], grad_fn=) + [[0.5984, 0.0043, 0.2075, 0.1700], + [0.5905, 0.1869, 0.2142, 0.0772], + [0.2146, 0.3152, 0.1176, 0.1768], + [0.4285, 0.1043, 0.0665, 0.3872]]]], grad_fn=) + + +```python + +``` diff --git a/hannah/conf/model/embedded_vision_net.yaml b/hannah/conf/model/embedded_vision_net.yaml index 39978a95..da3d5771 100644 --- a/hannah/conf/model/embedded_vision_net.yaml +++ b/hannah/conf/model/embedded_vision_net.yaml @@ -1,4 +1,4 @@ -_target_: hannah.models.embedded_vision_net.models.search_space +_target_: hannah.models.embedded_vision_net.models.embedded_vision_net name: embedded_vision_net num_classes: 10 max_channels: 256 diff --git a/hannah/models/embedded_vision_net/models.py b/hannah/models/embedded_vision_net/models.py index 3461adbf..5f2e17fd 100644 --- a/hannah/models/embedded_vision_net/models.py +++ b/hannah/models/embedded_vision_net/models.py @@ -50,7 +50,7 @@ from hannah.nas.expressions.types import Int from hannah.nas.expressions.utils import extract_parameter_from_expression from hannah.nas.functional_operators.executor import BasicExecutor -from hannah.nas.functional_operators.op import Tensor, get_nodes, scope +from hannah.nas.functional_operators.op import Tensor, get_nodes, scope, search_space from hannah.nas.functional_operators.operators import Conv2d # from hannah.nas.functional_operators.visualizer import Visualizer @@ -106,7 +106,8 @@ def backbone(input, num_classes=10, max_channels=512, max_blocks=9): return out -def search_space( +@search_space +def embedded_vision_net( name, input, num_classes: int, diff --git a/hannah/nas/constraints/random_walk.py b/hannah/nas/constraints/random_walk.py index 99f47b4a..4388bd4a 100644 --- a/hannah/nas/constraints/random_walk.py +++ b/hannah/nas/constraints/random_walk.py @@ -25,6 +25,8 @@ import numpy as np from hannah.nas.functional_operators.lazy import lazy +from hannah.nas.functional_operators.op import ChoiceOp +from hannah.nas.parameters.parameters import Parameter from hannah.nas.parameters.parametrize import set_parametrization from hannah.nas.search.utils import np_to_primitive @@ -70,32 +72,36 @@ def hierarchical_parameter_dict(parameter, include_empty=False, flatten=False): } -def get_active_parameter(params): - # FIXME: this needs to be generalized - active_params = {} - params = hierarchical_parameter_dict(params) - num_blocks = params["ChoiceOp_0"]["num_blocks"][""].value - active_params["num_blocks"] = num_blocks + 1 - for i in range(num_blocks + 1): - current_block = f"block_{i}" - depth = params[current_block]["ChoiceOp_0"]["depth"].value - active_params[params[current_block]["ChoiceOp_0"]["depth"].name] = depth + 1 - for j in range(depth + 1): - current_pattern = f"pattern_{j}" - choice = params[current_block][current_pattern]["ChoiceOp_0.choice"].value - for k, v in params[current_block][current_pattern].items(): - if k.split(".")[0] == "Conv2d_0": - active_params[v.name] = v.value - elif "expand_reduce" in k and choice == 1: - active_params[v.name] = v.value - elif "reduce_expand" in k and choice == 2: - active_params[v.name] = v.value - elif "pooling" in k and choice == 3: - active_params[v.name] = v.value - elif "ChoiceOp" in k: - active_params[v.name] = CHOICES[v.value] - - return active_params +def get_active_parameter(net): + active_param_ids = [] + queue = [net] + visited = [net.id] + + def extract_parameters(node): + ids = [] + for k, p in node._PARAMETERS.items(): + if isinstance(p, Parameter): + ids.append(p.id) + return ids + + while queue: + current = queue.pop() + if isinstance(current, ChoiceOp): + # handle choices + active_param_ids.append(current.switch.id) + chosen_path = current.options[lazy(current.switch)] + if chosen_path.id not in visited: + queue.append(chosen_path) + visited.append(chosen_path.id) + else: + # handle all other operators & tensors + active_param_ids.extend(extract_parameters(current)) + for operand in current.operands: + if operand.id not in visited: + queue.append(operand) + visited.append(operand.id) + + return active_param_ids class RandomWalkConstraintSolver: @@ -176,7 +182,8 @@ def solve(self, module, parameters, fix_vars=[]): ct = 0 while ct < self.max_iterations: - active_params = get_active_parameter(params) + # active_params = get_active_parameter(params) + active_params = get_active_parameter(mod) param_keys = [p for p in all_param_keys if p in active_params] current = con.lhs.evaluate() diff --git a/hannah/nas/functional_operators/op.py b/hannah/nas/functional_operators/op.py index 489ed80f..59cb4397 100644 --- a/hannah/nas/functional_operators/op.py +++ b/hannah/nas/functional_operators/op.py @@ -75,49 +75,55 @@ def get_unique_id(): return _id -_id = 0 - - -def get_unique_id(): - global _id - _id += 1 - return _id - +def get_highest_scope_counter(scope, scope_dict): + if scope in scope_dict: + scope_dict[scope] += 1 + else: + scope_dict[scope] = 0 + return scope_dict[scope] -# FIXME: Traverses nodes to often -> massively increases time when building -# search spaces -def get_highest_scope_counter(start_nodes, scope): - ct = -1 - for start_node in start_nodes: - for n in get_nodes(start_node): - highest_scope = n.id.split(".")[0] - if scope == "_".join(highest_scope.split("_")[:-1]): - ct = max(int(highest_scope.split("_")[-1]), ct) - return ct - -# TODO: Make scopes accessible, e.g., as a list def scope(function): + """Decorator defining a scope in a search space. The id of every subcomponent (operators or lower-hierarchy scopes) + enclosed in a function decorated with this will be prefixed with the name of the function, creating a + hierarchical scope. + """ @wraps(function) def set_scope(*args, **kwargs): - out = function(*args, **kwargs) - name = function.__name__ + assert "global_scope_stack" in globals(), "No scope tracking found, did you wrap the search space with @search_space?" + inputs = [a for a in args if isinstance(a, (Op, Tensor))] + [ a for k, a in kwargs.items() if isinstance(a, (Op, Tensor)) ] - ct = get_highest_scope_counter(inputs, name) + 1 + name = function.__name__ + global global_scope_stack + ct = get_highest_scope_counter(name, global_scope_stack[-1]) + global_scope_stack.append({}) + out = function(*args, **kwargs) for n in nodes_in_scope(out, inputs): n.setid(f"{name}_{ct}.{n.id}") - # n.id = f"{name}_{ct}.{n.id}" - # print(n.id) - # for k, p in n._PARAMETERS.items(): - # if isinstance(p, Expression): - # p.id = f"{name}.{k}" + global_scope_stack.pop() return out return set_scope +def search_space(function): + """Decorator to define a search space. For correct scoping, + a search space containing functional ops must be enclosed by + a function decorated with @search_space. + """ + @wraps(function) + def search_space_limits(*args, **kwargs): + global global_scope_stack + global_scope_stack = [{}] + out = scope(function)(*args, **kwargs) + del global_scope_stack + return out + + return search_space_limits + + @parametrize class Op: def __init__(self, name, *args, **kwargs) -> None: @@ -133,10 +139,14 @@ def __call__(self, *operands) -> Any: new_op = self # FIXME: Just use self? for operand in operands: operand.connect(new_op) - ct = get_highest_scope_counter(operands, self.name) + 1 + # Some Ops (ChoiceOp) can be called multiple times and already have a counter if not len(self.id.split(".")[-1].split("_")) > 1: + assert "global_scope_stack" in globals(), "No scope tracking found, did you wrap the search space with @search_space?" + global global_scope_stack + ct = get_highest_scope_counter(self.name, global_scope_stack[-1]) self.id = f"{self.id}_{ct}" + # self.setid(f"{self.id}_{ct}") return new_op def connect(self, node): @@ -318,7 +328,9 @@ def _connect_options(self, *operands): self.options[i] = node_opt(*operands) if is_parametrized(self.options[i]): self._PARAMETERS[self.options[i].id] = self.options[i] # FIXME: - ct = get_highest_scope_counter(operands, self.name) + 1 + assert "global_scope_stack" in globals(), "No scope tracking found, did you wrap the search space with @search_space?" + global global_scope_stack + ct = get_highest_scope_counter(self.name, global_scope_stack[-1]) self.id = f"{self.id}_{ct}" def shape_fun(self): diff --git a/hannah/nas/performance_prediction/protocol.py b/hannah/nas/performance_prediction/protocol.py index f775372f..fa658d67 100644 --- a/hannah/nas/performance_prediction/protocol.py +++ b/hannah/nas/performance_prediction/protocol.py @@ -53,7 +53,6 @@ def predict( ... -@runtime_checkable class FitablePredictor(Predictor): def load(self, result_folder: str): """Load predefined model from a folder. diff --git a/hannah/nas/test/test_functional_executor.py b/hannah/nas/test/test_functional_executor.py index 16154fad..9ad2bfa7 100644 --- a/hannah/nas/test/test_functional_executor.py +++ b/hannah/nas/test/test_functional_executor.py @@ -1,7 +1,7 @@ from hannah.nas.functional_operators.executor import BasicExecutor from hannah.nas.functional_operators.lazy import lazy from hannah.nas.functional_operators.operators import Conv2d, Linear, Relu -from hannah.nas.functional_operators.op import Tensor +from hannah.nas.functional_operators.op import Tensor, search_space from hannah.nas.parameters.parameters import CategoricalParameter, IntScalarParameter from torch.optim import SGD @@ -42,6 +42,7 @@ def linear(input, out_features): return out +@search_space def network(input): out = conv_relu(input, out_channels=IntScalarParameter(32, 64, name='out_channels'), diff --git a/hannah/nas/test/test_functional_ops.py b/hannah/nas/test/test_functional_ops.py index d94f5b60..f4bb300c 100644 --- a/hannah/nas/test/test_functional_ops.py +++ b/hannah/nas/test/test_functional_ops.py @@ -1,6 +1,6 @@ from hannah.nas.expressions.arithmetic import Ceil from hannah.nas.expressions.types import Int -from hannah.nas.functional_operators.op import ChoiceOp, OptionalOp, Tensor, scope, nodes_in_scope +from hannah.nas.functional_operators.op import ChoiceOp, OptionalOp, Tensor, scope, nodes_in_scope, search_space from hannah.nas.functional_operators.operators import Add, Conv2d, Relu, Identity from hannah.nas.parameters.parameters import CategoricalParameter, IntScalarParameter from functools import partial @@ -66,58 +66,70 @@ def res_block(input): def test_functional_ops(): - kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') - out_channels = IntScalarParameter(min=4, max=64, name='out_channels') - stride = CategoricalParameter([1, 2], name='stride') + @search_space + def test_space(): + kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') + out_channels = IntScalarParameter(min=4, max=64, name='out_channels') + stride = CategoricalParameter([1, 2], name='stride') - input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) - weight = Tensor(name='weight', - shape=(out_channels, 3, kernel_size, kernel_size), - axis=('O', 'I', 'kH', 'kW')) - net = Conv2d(stride=stride)(input, weight) - net = Relu()(net) + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) + weight = Tensor(name='weight', + shape=(out_channels, 3, kernel_size, kernel_size), + axis=('O', 'I', 'kH', 'kW')) + net = Conv2d(stride=stride)(input, weight) + net = Relu()(net) + return net + net = test_space() def test_functional_ops_chained(): - kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') - out_channels = IntScalarParameter(min=4, max=64, name='out_channels') - stride = CategoricalParameter([1, 2], name='stride') - - input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) - weight0 = Tensor(name='weight', - shape=(out_channels, 3, kernel_size, kernel_size), - axis=('O', 'I', 'kH', 'kW')) - net = Conv2d(stride=stride)(input, weight0) - net = Relu()(net) - - weight1 = Tensor(name='weight', - shape=(out_channels, 3, kernel_size, kernel_size), - axis=('O', 'I', 'kH', 'kW')) + @search_space + def test_space(): + kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') + out_channels = IntScalarParameter(min=4, max=64, name='out_channels') + stride = CategoricalParameter([1, 2], name='stride') - net = Conv2d(stride=stride.new())(net, weight1) - net = Relu()(net) + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) + weight0 = Tensor(name='weight', + shape=(out_channels, 3, kernel_size, kernel_size), + axis=('O', 'I', 'kH', 'kW')) + net = Conv2d(stride=stride)(input, weight0) + net = Relu()(net) + + weight1 = Tensor(name='weight', + shape=(out_channels, 3, kernel_size, kernel_size), + axis=('O', 'I', 'kH', 'kW')) + + net = Conv2d(stride=stride.new())(net, weight1) + net = Relu()(net) + return net + net = test_space() # print(net.parametrization(flatten=True)) def test_shape_propagation(): - kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') - out_channels = IntScalarParameter(min=4, max=64, name='out_channels') - stride = CategoricalParameter([1, 2], name='stride') - stride.current_value = 1 + @search_space + def test_space(): + kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') + out_channels = IntScalarParameter(min=4, max=64, name='out_channels') + stride = CategoricalParameter([1, 2], name='stride') + stride.current_value = 1 - input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) - net = conv2d(input, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) - net = relu(net) + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) + net = conv2d(input, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) + net = relu(net) - net = conv2d(net, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) - net = relu(net) + net = conv2d(net, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) + net = relu(net) - stride.current_value = 2 - out_channels.current_value = 48 + stride.current_value = 2 + out_channels.current_value = 48 - net = conv2d(net, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) - net = relu(net) + net = conv2d(net, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) + net = relu(net) + return net + net = test_space() shape = net.shape() assert shape[0] == 1 @@ -127,13 +139,17 @@ def test_shape_propagation(): def test_blocks(): - kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') - out_channels = IntScalarParameter(min=4, max=64, name='out_channels') + @search_space + def test_space(): + kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') + out_channels = IntScalarParameter(min=4, max=64, name='out_channels') - input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) - net = conv_relu(input, out_channels=out_channels.new(), kernel_size=kernel_size.new(), stride=1) + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) + net = conv_relu(input, out_channels=out_channels.new(), kernel_size=kernel_size.new(), stride=1) - net = conv_relu(net, out_channels=32, kernel_size=kernel_size.new(), stride=2) + net = conv_relu(net, out_channels=32, kernel_size=kernel_size.new(), stride=2) + return net + net = test_space() def test_operators(): @@ -142,29 +158,39 @@ def test_operators(): def test_multibranches(): - kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') - out_channels = IntScalarParameter(min=4, max=64, name='out_channels') + @search_space + def test_space(): + kernel_size = CategoricalParameter([1, 3, 5], name='kernel_size') + out_channels = IntScalarParameter(min=4, max=64, name='out_channels') - input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) - net = conv_relu(input, out_channels=out_channels.new(), kernel_size=kernel_size.new(), stride=2) + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) + net = conv_relu(input, out_channels=out_channels.new(), kernel_size=kernel_size.new(), stride=2) - stride = CategoricalParameter([1, 2], name='stride') - stride.current_value = 1 - block_channels = out_channels.new() - block_channels.current_value = 48 - main_branch = conv_relu(net, out_channels=block_channels, kernel_size=kernel_size, stride=stride) - res = residual(net, out_channels=block_channels, in_size=net.shape()[2], out_size=main_branch.shape()[2]) - net = add(main_branch, res) - stride.current_value = 2 + stride = CategoricalParameter([1, 2], name='stride') + stride.current_value = 1 + block_channels = out_channels.new() + block_channels.current_value = 48 + main_branch = conv_relu(net, out_channels=block_channels, kernel_size=kernel_size, stride=stride) + res = residual(net, out_channels=block_channels, in_size=net.shape()[2], out_size=main_branch.shape()[2]) + net = add(main_branch, res) + stride.current_value = 2 + return net + net = test_space() def test_scoping(): + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) - net = double_conv(input) - net = double_conv(net) - net = double_conv(net) - test_scope = 'double_conv_1.Conv2d_0' + @search_space + def test_space(input): + net = double_conv(input) + net = double_conv(net) + net = double_conv(net) + return net + net = test_space(input) + + test_scope = 'test_space_0.double_conv_1.Conv2d_0' scopes = [] for node in nodes_in_scope(net, [input]): scopes.append(node.id) @@ -172,8 +198,9 @@ def test_scoping(): def test_parametrization(): - input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) - net = res_block(input) + # input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) + # net = res_block(input) + pass def test_choice(): @@ -192,11 +219,15 @@ def choice_block(): net = conv2d(net, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) return net - net = choice_block() - assert 'choice_block_0.ChoiceOp_0.choice' in net.parametrization(flatten=True) - net.parametrization(flatten=True)['choice_block_0.ChoiceOp_0.choice'].set_current(0) - net.parametrization(flatten=True)['choice_block_0.ChoiceOp_0.choice'].set_current(1) - net.parametrization(flatten=True)['choice_block_0.ChoiceOp_0.choice'].set_current(2) + @search_space + def test_space(): + return choice_block() + + net = test_space() + assert 'test_space_0.choice_block_0.ChoiceOp_0.choice' in net.parametrization(flatten=True) + net.parametrization(flatten=True)['test_space_0.choice_block_0.ChoiceOp_0.choice'].set_current(0) + net.parametrization(flatten=True)['test_space_0.choice_block_0.ChoiceOp_0.choice'].set_current(1) + net.parametrization(flatten=True)['test_space_0.choice_block_0.ChoiceOp_0.choice'].set_current(2) def test_optional_op(): @@ -212,7 +243,11 @@ def optional_block(): net = conv2d(net, out_channels=out_channels.new(), stride=stride.new(), kernel_size=kernel_size.new()) return net - net = optional_block() + @search_space + def test_space(): + return optional_block() + + net = test_space() def test_dynamic_depth(): @@ -230,24 +265,29 @@ def dynamic_depth_block(input, depth): return net - input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) - depth_choice = IntScalarParameter(min=0, max=2, name="depth_choice") - depth_choice.current_value = 2 + @search_space + def test_space(): + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) + depth_choice = IntScalarParameter(min=0, max=2, name="depth_choice") + depth_choice.current_value = 2 + + net = dynamic_depth_block(input, depth_choice) + return net - net = dynamic_depth_block(input, depth_choice) + net = test_space() # net = conv_relu(net, 32, 1, 1) if __name__ == '__main__': - # test_functional_ops() + test_functional_ops() test_functional_ops_chained() - # test_shape_propagation() - # test_blocks() - # test_operators() - # test_multibranches() - # test_scoping() + test_shape_propagation() + test_blocks() + test_operators() + test_multibranches() + test_scoping() # test_parametrization() - # test_choice() - # test_optional_op() - # test_dynamic_depth() + test_choice() + test_optional_op() + test_dynamic_depth() # test_visualization() diff --git a/hannah/nas/test/test_nas_graph_dataset_for_predictor.py b/hannah/nas/test/test_nas_graph_dataset_for_predictor.py new file mode 100644 index 00000000..dee84e5b --- /dev/null +++ b/hannah/nas/test/test_nas_graph_dataset_for_predictor.py @@ -0,0 +1,31 @@ +import torch +from hannah.nas.functional_operators.executor import BasicExecutor +from hannah.nas.functional_operators.op import Tensor +from hannah.nas.graph_conversion import model_to_graph +from hannah.nas.performance_prediction.features.dataset import OnlineNASGraphDataset, get_features, to_dgl_graph +from hannah.models.embedded_vision_net.models import embedded_vision_net, search_space + + +def test_online_dataset(): + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=("N", "C", "H", "W")) + space = embedded_vision_net("test", input, 10, 128) + space.sample() + model = BasicExecutor(space) + model.initialize() + + x = torch.ones(input.shape()) + nx_graph = model_to_graph(model, x) + fea = get_features(nx_graph) + for i, n in enumerate(nx_graph.nodes): + nx_graph.nodes[n]['features'] = fea.iloc[i].to_numpy() + dgl_graph = to_dgl_graph(nx_graph) + + graphs = [dgl_graph] + labels = [1.0] + + dataset = OnlineNASGraphDataset(graphs, labels) + print() + + +if __name__ == '__main__': + test_online_dataset() \ No newline at end of file diff --git a/hannah/nas/test/test_onnx_export.py b/hannah/nas/test/test_onnx_export.py index c2c7d2c2..c3e8d9f3 100644 --- a/hannah/nas/test/test_onnx_export.py +++ b/hannah/nas/test/test_onnx_export.py @@ -21,7 +21,7 @@ import onnx -from hannah.models.embedded_vision_net.models import search_space +from hannah.models.embedded_vision_net.models import embedded_vision_net, search_space from hannah.nas.constraints.random_walk import RandomWalkConstraintSolver from hannah.nas.export import to_onnx from hannah.nas.functional_operators.op import ChoiceOp, Tensor, scope @@ -31,7 +31,7 @@ from hannah.nas.search.sampler.random_sampler import RandomSampler -@scope +@search_space def conv3x3_relu(input): out_channels = 48 weight = Tensor( @@ -45,7 +45,7 @@ def conv3x3_relu(input): return relu -@scope +@search_space def op_choice(input): conv = Conv2d()( input, @@ -100,7 +100,7 @@ def test_export_embedded_vision_net(): print("Init search space") input = Tensor("input", (1, 3, 32, 32), axis=["N", "C", "H", "W"], grad=False) - graph = search_space("test", input, num_classes=10, max_blocks=2) + graph = embedded_vision_net("test", input, num_classes=10, max_blocks=2) print(graph) print("Init sampler") @@ -127,3 +127,10 @@ def test_export_embedded_vision_net(): print("") print("Done") + + +if __name__ == "__main__": + test_deepcopy() + test_export_choice() + test_export_conv2d() + test_export_embedded_vision_net() diff --git a/hannah/nas/test/test_operators.py b/hannah/nas/test/test_operators.py index c5365812..85049fe0 100644 --- a/hannah/nas/test/test_operators.py +++ b/hannah/nas/test/test_operators.py @@ -1,6 +1,6 @@ from hannah.models.embedded_vision_net.blocks import grouped_pointwise from hannah.nas.functional_operators.executor import BasicExecutor -from hannah.nas.functional_operators.op import Tensor +from hannah.nas.functional_operators.op import Tensor, search_space from torch.testing import assert_close import torch.nn as nn import torch @@ -25,8 +25,12 @@ def forward(self, x): out = torch.add(out_0, out) return out + @search_space + def pw_space(input, out_channels): + return grouped_pointwise(input, out_channels) + input = Tensor(name="input", shape=(1, 64, 32, 32), axis=("N", "C", "H", "W")) - grouped_pw = grouped_pointwise(input, out_channels=128) + grouped_pw = pw_space(input, out_channels=128) model = BasicExecutor(grouped_pw) model.initialize() @@ -37,8 +41,8 @@ def forward(self, x): params = dict(model.named_parameters()) with torch.no_grad(): - torch_mod.pw_k.weight = params["grouped_pointwise_0_Conv2d_0_weight"] - torch_mod.pw_l.weight = params["grouped_pointwise_0_Conv2d_1_weight"] + torch_mod.pw_k.weight = params["pw_space_0_grouped_pointwise_0_Conv2d_0_weight"] + torch_mod.pw_l.weight = params["pw_space_0_grouped_pointwise_0_Conv2d_1_weight"] torch_out = torch_mod(x) diff --git a/hannah/nas/test/test_parameter_scopes.py b/hannah/nas/test/test_parameter_scopes.py new file mode 100644 index 00000000..14e5c504 --- /dev/null +++ b/hannah/nas/test/test_parameter_scopes.py @@ -0,0 +1,34 @@ +from hannah.nas.functional_operators.operators import Relu, Conv2d +from hannah.nas.functional_operators.op import scope, search_space, Tensor +from hannah.nas.parameters.parameters import IntScalarParameter, CategoricalParameter + + +@search_space +def simple_search_space(): + input = Tensor(name='input', + shape=(1, 3, 32, 32), + axis=('N', 'C', 'H', 'W')) + + weight_0 = Tensor(name='weight', shape=(IntScalarParameter(min=8, max=64, name='out_channels'), 3, 1, 1), axis=("O", "I", "kH", "kW")) + + conv_0 = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(input, weight_0) + relu_0 = Relu()(conv_0) + + weight_1 = Tensor(name='weight', shape=(IntScalarParameter(min=32, max=64, name='out_channels'), conv_0.shape()[1], 3, 3), axis=("O", "I", "kH", "kW")) + conv_1 = Conv2d(stride=CategoricalParameter(name='stride', choices=[1, 2]))(relu_0, weight_1) + relu_1 = Relu()(conv_1) + return relu_1 + + +def test_parameter_scoping(): + out = simple_search_space() + params = out.parametrization() + assert len(params) == 4 + assert 'simple_search_space_0.Conv2d_1.stride' in params + assert 'simple_search_space_0.Conv2d_1.weight.out_channels' in params + assert 'simple_search_space_0.Conv2d_0.stride' in params + assert 'simple_search_space_0.Conv2d_1.weight.out_channels' in params + + +if __name__ == "__main__": + test_parameter_scoping() \ No newline at end of file diff --git a/hannah/nas/test/test_random_walk_constrainer.py b/hannah/nas/test/test_random_walk_constrainer.py new file mode 100644 index 00000000..2dad392b --- /dev/null +++ b/hannah/nas/test/test_random_walk_constrainer.py @@ -0,0 +1,63 @@ +from hannah.nas.functional_operators.op import Tensor, scope, search_space +from hannah.nas.constraints.random_walk import get_active_parameter +from hannah.models.embedded_vision_net.operators import adaptive_avg_pooling, add, conv_relu, dynamic_depth, linear +from hannah.nas.parameters.parameters import CategoricalParameter, IntScalarParameter + + +@scope +def conv_block(input, stride, kernel_size, out_channels): + out = conv_relu(input, stride=stride, kernel_size=kernel_size, out_channels=out_channels) + out = conv_relu(out, stride=1, kernel_size=kernel_size, out_channels=out_channels) + return out + + +@scope +def parallel_blocks(input, stride, kernel_size, out_channels): + out_0 = conv_block(input, stride=stride, kernel_size=kernel_size, out_channels=out_channels) + out_1 = conv_block(input, stride=stride, kernel_size=kernel_size, out_channels=out_channels) + out = add(out_0, out_1) + return out + + +@scope +def classifier_head(input, num_classes): + out = adaptive_avg_pooling(input) + out = linear(out, num_classes) + return out + + +@search_space +def space(input): + out_channels = IntScalarParameter(4, 64, 4, name="out_channels") + kernel_size = CategoricalParameter([1, 3, 5, 7], name="kernel_size") + stride = IntScalarParameter(1, 2, name="stride") + depth = IntScalarParameter(0, 2, name="depth") + out = conv_relu(input, out_channels=out_channels.new(), kernel_size=kernel_size.new(), stride=stride.new()) + block_0 = parallel_blocks(out, out_channels=out_channels.new(), kernel_size=kernel_size.new(), stride=stride.new()) + block_1 = parallel_blocks(block_0, out_channels=out_channels.new(), kernel_size=kernel_size.new(), stride=stride.new()) + block_2 = parallel_blocks(block_1, out_channels=out_channels.new(), kernel_size=kernel_size.new(), stride=stride.new()) + out = dynamic_depth(block_0, block_1, block_2, switch=depth) + out = classifier_head(out, num_classes=10) + return out + + +def test_get_active_params(): + input = Tensor(name='input', shape=(1, 3, 32, 32), axis=('N', 'C', 'H', 'W')) + out = space(input) + active_params = get_active_parameter(out) + assert len(active_params) == 7 + for p in active_params: + assert "parallel_blocks_1" not in p and "parallel_blocks_2" not in p + out.parametrization()['space_0.ChoiceOp_0.depth'].set_current(1) + active_params = get_active_parameter(out) + assert len(active_params) == 10 + for p in active_params: + assert "parallel_blocks_2" not in p + out.parametrization()['space_0.ChoiceOp_0.depth'].set_current(2) + active_params = get_active_parameter(out) + assert len(active_params) == 13 + + +if __name__ == '__main__': + test_get_active_params() + diff --git a/poetry.lock b/poetry.lock index a81c3918..1c8878bb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "absl-py" @@ -182,6 +182,35 @@ files = [ {file = "antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b"}, ] +[[package]] +name = "appnope" +version = "0.1.4" +description = "Disable App Nap on macOS >= 10.9" +optional = false +python-versions = ">=3.6" +files = [ + {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, + {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, +] + +[[package]] +name = "asttokens" +version = "2.4.1" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = "*" +files = [ + {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, + {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, +] + +[package.dependencies] +six = ">=1.12.0" + +[package.extras] +astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] +test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] + [[package]] name = "async-timeout" version = "4.0.3" @@ -626,6 +655,23 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} [package.extras] development = ["black", "flake8", "mypy", "pytest", "types-colorama"] +[[package]] +name = "comm" +version = "0.2.2" +description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." +optional = false +python-versions = ">=3.8" +files = [ + {file = "comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3"}, + {file = "comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e"}, +] + +[package.dependencies] +traitlets = ">=4" + +[package.extras] +test = ["pytest"] + [[package]] name = "contourpy" version = "1.2.1" @@ -807,6 +853,48 @@ files = [ marshmallow = ">=3.18.0,<4.0.0" typing-inspect = ">=0.4.0,<1" +[[package]] +name = "debugpy" +version = "1.8.1" +description = "An implementation of the Debug Adapter Protocol for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "debugpy-1.8.1-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:3bda0f1e943d386cc7a0e71bfa59f4137909e2ed947fb3946c506e113000f741"}, + {file = "debugpy-1.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dda73bf69ea479c8577a0448f8c707691152e6c4de7f0c4dec5a4bc11dee516e"}, + {file = "debugpy-1.8.1-cp310-cp310-win32.whl", hash = "sha256:3a79c6f62adef994b2dbe9fc2cc9cc3864a23575b6e387339ab739873bea53d0"}, + {file = "debugpy-1.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:7eb7bd2b56ea3bedb009616d9e2f64aab8fc7000d481faec3cd26c98a964bcdd"}, + {file = "debugpy-1.8.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:016a9fcfc2c6b57f939673c874310d8581d51a0fe0858e7fac4e240c5eb743cb"}, + {file = "debugpy-1.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd97ed11a4c7f6d042d320ce03d83b20c3fb40da892f994bc041bbc415d7a099"}, + {file = "debugpy-1.8.1-cp311-cp311-win32.whl", hash = "sha256:0de56aba8249c28a300bdb0672a9b94785074eb82eb672db66c8144fff673146"}, + {file = "debugpy-1.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:1a9fe0829c2b854757b4fd0a338d93bc17249a3bf69ecf765c61d4c522bb92a8"}, + {file = "debugpy-1.8.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3ebb70ba1a6524d19fa7bb122f44b74170c447d5746a503e36adc244a20ac539"}, + {file = "debugpy-1.8.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2e658a9630f27534e63922ebf655a6ab60c370f4d2fc5c02a5b19baf4410ace"}, + {file = "debugpy-1.8.1-cp312-cp312-win32.whl", hash = "sha256:caad2846e21188797a1f17fc09c31b84c7c3c23baf2516fed5b40b378515bbf0"}, + {file = "debugpy-1.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:edcc9f58ec0fd121a25bc950d4578df47428d72e1a0d66c07403b04eb93bcf98"}, + {file = "debugpy-1.8.1-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:7a3afa222f6fd3d9dfecd52729bc2e12c93e22a7491405a0ecbf9e1d32d45b39"}, + {file = "debugpy-1.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d915a18f0597ef685e88bb35e5d7ab968964b7befefe1aaea1eb5b2640b586c7"}, + {file = "debugpy-1.8.1-cp38-cp38-win32.whl", hash = "sha256:92116039b5500633cc8d44ecc187abe2dfa9b90f7a82bbf81d079fcdd506bae9"}, + {file = "debugpy-1.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:e38beb7992b5afd9d5244e96ad5fa9135e94993b0c551ceebf3fe1a5d9beb234"}, + {file = "debugpy-1.8.1-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:bfb20cb57486c8e4793d41996652e5a6a885b4d9175dd369045dad59eaacea42"}, + {file = "debugpy-1.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efd3fdd3f67a7e576dd869c184c5dd71d9aaa36ded271939da352880c012e703"}, + {file = "debugpy-1.8.1-cp39-cp39-win32.whl", hash = "sha256:58911e8521ca0c785ac7a0539f1e77e0ce2df753f786188f382229278b4cdf23"}, + {file = "debugpy-1.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:6df9aa9599eb05ca179fb0b810282255202a66835c6efb1d112d21ecb830ddd3"}, + {file = "debugpy-1.8.1-py2.py3-none-any.whl", hash = "sha256:28acbe2241222b87e255260c76741e1fbf04fdc3b6d094fcf57b6c6f75ce1242"}, + {file = "debugpy-1.8.1.zip", hash = "sha256:f696d6be15be87aef621917585f9bb94b1dc9e8aced570db1b8a6fc14e8f9b42"}, +] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + [[package]] name = "dgl" version = "1.1.3" @@ -883,6 +971,20 @@ files = [ [package.extras] testing = ["hatch", "pre-commit", "pytest", "tox"] +[[package]] +name = "executing" +version = "2.0.1" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = ">=3.5" +files = [ + {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"}, + {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"}, +] + +[package.extras] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] + [[package]] name = "fairscale" version = "0.4.13" @@ -1641,6 +1743,76 @@ files = [ [package.dependencies] sortedcontainers = ">=2.0,<3.0" +[[package]] +name = "ipykernel" +version = "6.29.4" +description = "IPython Kernel for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ipykernel-6.29.4-py3-none-any.whl", hash = "sha256:1181e653d95c6808039c509ef8e67c4126b3b3af7781496c7cbfb5ed938a27da"}, + {file = "ipykernel-6.29.4.tar.gz", hash = "sha256:3d44070060f9475ac2092b760123fadf105d2e2493c24848b6691a7c4f42af5c"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "platform_system == \"Darwin\""} +comm = ">=0.1.1" +debugpy = ">=1.6.5" +ipython = ">=7.23.1" +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +matplotlib-inline = ">=0.1" +nest-asyncio = "*" +packaging = "*" +psutil = "*" +pyzmq = ">=24" +tornado = ">=6.1" +traitlets = ">=5.4.0" + +[package.extras] +cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] +pyqt5 = ["pyqt5"] +pyside6 = ["pyside6"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "ipython" +version = "8.18.1" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.9" +files = [ + {file = "ipython-8.18.1-py3-none-any.whl", hash = "sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397"}, + {file = "ipython-8.18.1.tar.gz", hash = "sha256:ca6f079bb33457c66e233e4580ebfc4128855b4cf6370dddd73842a9563e8a27"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} +prompt-toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.4.0" +stack-data = "*" +traitlets = ">=5" +typing-extensions = {version = "*", markers = "python_version < \"3.10\""} + +[package.extras] +all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] +kernel = ["ipykernel"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath"] +test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath", "trio"] + [[package]] name = "isort" version = "5.13.2" @@ -1655,6 +1827,25 @@ files = [ [package.extras] colors = ["colorama (>=0.4.6)"] +[[package]] +name = "jedi" +version = "0.19.1" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +files = [ + {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, + {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, +] + +[package.dependencies] +parso = ">=0.8.3,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] + [[package]] name = "jinja2" version = "3.1.4" @@ -1683,6 +1874,49 @@ files = [ {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, ] +[[package]] +name = "jupyter-client" +version = "8.6.2" +description = "Jupyter protocol implementation and client libraries" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_client-8.6.2-py3-none-any.whl", hash = "sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f"}, + {file = "jupyter_client-8.6.2.tar.gz", hash = "sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +python-dateutil = ">=2.8.2" +pyzmq = ">=23.0" +tornado = ">=6.2" +traitlets = ">=5.3" + +[package.extras] +docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] + +[[package]] +name = "jupyter-core" +version = "5.7.2" +description = "Jupyter core package. A base package on which Jupyter projects rely." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"}, + {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] + [[package]] name = "kiwisolver" version = "1.4.5" @@ -2181,6 +2415,20 @@ pillow = ">=8" pyparsing = ">=2.3.1" python-dateutil = ">=2.7" +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +description = "Inline Matplotlib backend for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, + {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, +] + +[package.dependencies] +traitlets = "*" + [[package]] name = "mergedeep" version = "1.3.4" @@ -2502,6 +2750,17 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + [[package]] name = "networkx" version = "3.2.1" @@ -3039,6 +3298,21 @@ pytz = ">=2020.1" [package.extras] test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] +[[package]] +name = "parso" +version = "0.8.4" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +files = [ + {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, + {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, +] + +[package.extras] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["docopt", "pytest"] + [[package]] name = "pathspec" version = "0.12.1" @@ -3061,6 +3335,20 @@ files = [ {file = "pbr-6.0.0.tar.gz", hash = "sha256:d1377122a5a00e2f940ee482999518efe16d745d423a670c27773dfbc3c9a7d9"}, ] +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + [[package]] name = "pillow" version = "10.3.0" @@ -3227,6 +3515,20 @@ wcwidth = "*" [package.extras] tests = ["pytest", "pytest-cov", "pytest-lazy-fixtures"] +[[package]] +name = "prompt-toolkit" +version = "3.0.43" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, + {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, +] + +[package.dependencies] +wcwidth = "*" + [[package]] name = "protobuf" version = "5.26.1" @@ -3275,6 +3577,31 @@ files = [ [package.extras] test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.2" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +files = [ + {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, + {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, +] + +[package.extras] +tests = ["pytest"] + [[package]] name = "pwlf" version = "2.2.1" @@ -3725,6 +4052,29 @@ files = [ {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, ] +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + [[package]] name = "pyyaml" version = "6.0.1" @@ -3737,6 +4087,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -3744,8 +4095,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3762,6 +4120,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -3769,6 +4128,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -3788,6 +4148,106 @@ files = [ [package.dependencies] pyyaml = "*" +[[package]] +name = "pyzmq" +version = "26.0.3" +description = "Python bindings for 0MQ" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:44dd6fc3034f1eaa72ece33588867df9e006a7303725a12d64c3dff92330f625"}, + {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:acb704195a71ac5ea5ecf2811c9ee19ecdc62b91878528302dd0be1b9451cc90"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dbb9c997932473a27afa93954bb77a9f9b786b4ccf718d903f35da3232317de"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6bcb34f869d431799c3ee7d516554797f7760cb2198ecaa89c3f176f72d062be"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ece17ec5f20d7d9b442e5174ae9f020365d01ba7c112205a4d59cf19dc38ee"}, + {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ba6e5e6588e49139a0979d03a7deb9c734bde647b9a8808f26acf9c547cab1bf"}, + {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3bf8b000a4e2967e6dfdd8656cd0757d18c7e5ce3d16339e550bd462f4857e59"}, + {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2136f64fbb86451dbbf70223635a468272dd20075f988a102bf8a3f194a411dc"}, + {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e8918973fbd34e7814f59143c5f600ecd38b8038161239fd1a3d33d5817a38b8"}, + {file = "pyzmq-26.0.3-cp310-cp310-win32.whl", hash = "sha256:0aaf982e68a7ac284377d051c742610220fd06d330dcd4c4dbb4cdd77c22a537"}, + {file = "pyzmq-26.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:f1a9b7d00fdf60b4039f4455afd031fe85ee8305b019334b72dcf73c567edc47"}, + {file = "pyzmq-26.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:80b12f25d805a919d53efc0a5ad7c0c0326f13b4eae981a5d7b7cc343318ebb7"}, + {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:a72a84570f84c374b4c287183debc776dc319d3e8ce6b6a0041ce2e400de3f32"}, + {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ca684ee649b55fd8f378127ac8462fb6c85f251c2fb027eb3c887e8ee347bcd"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e222562dc0f38571c8b1ffdae9d7adb866363134299264a1958d077800b193b7"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f17cde1db0754c35a91ac00b22b25c11da6eec5746431d6e5092f0cd31a3fea9"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b7c0c0b3244bb2275abe255d4a30c050d541c6cb18b870975553f1fb6f37527"}, + {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:ac97a21de3712afe6a6c071abfad40a6224fd14fa6ff0ff8d0c6e6cd4e2f807a"}, + {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:88b88282e55fa39dd556d7fc04160bcf39dea015f78e0cecec8ff4f06c1fc2b5"}, + {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:72b67f966b57dbd18dcc7efbc1c7fc9f5f983e572db1877081f075004614fcdd"}, + {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f4b6cecbbf3b7380f3b61de3a7b93cb721125dc125c854c14ddc91225ba52f83"}, + {file = "pyzmq-26.0.3-cp311-cp311-win32.whl", hash = "sha256:eed56b6a39216d31ff8cd2f1d048b5bf1700e4b32a01b14379c3b6dde9ce3aa3"}, + {file = "pyzmq-26.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:3191d312c73e3cfd0f0afdf51df8405aafeb0bad71e7ed8f68b24b63c4f36500"}, + {file = "pyzmq-26.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:b6907da3017ef55139cf0e417c5123a84c7332520e73a6902ff1f79046cd3b94"}, + {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:068ca17214038ae986d68f4a7021f97e187ed278ab6dccb79f837d765a54d753"}, + {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7821d44fe07335bea256b9f1f41474a642ca55fa671dfd9f00af8d68a920c2d4"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeb438a26d87c123bb318e5f2b3d86a36060b01f22fbdffd8cf247d52f7c9a2b"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69ea9d6d9baa25a4dc9cef5e2b77b8537827b122214f210dd925132e34ae9b12"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7daa3e1369355766dea11f1d8ef829905c3b9da886ea3152788dc25ee6079e02"}, + {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6ca7a9a06b52d0e38ccf6bca1aeff7be178917893f3883f37b75589d42c4ac20"}, + {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1b7d0e124948daa4d9686d421ef5087c0516bc6179fdcf8828b8444f8e461a77"}, + {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e746524418b70f38550f2190eeee834db8850088c834d4c8406fbb9bc1ae10b2"}, + {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6b3146f9ae6af82c47a5282ac8803523d381b3b21caeae0327ed2f7ecb718798"}, + {file = "pyzmq-26.0.3-cp312-cp312-win32.whl", hash = "sha256:2b291d1230845871c00c8462c50565a9cd6026fe1228e77ca934470bb7d70ea0"}, + {file = "pyzmq-26.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:926838a535c2c1ea21c903f909a9a54e675c2126728c21381a94ddf37c3cbddf"}, + {file = "pyzmq-26.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:5bf6c237f8c681dfb91b17f8435b2735951f0d1fad10cc5dfd96db110243370b"}, + {file = "pyzmq-26.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c0991f5a96a8e620f7691e61178cd8f457b49e17b7d9cfa2067e2a0a89fc1d5"}, + {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dbf012d8fcb9f2cf0643b65df3b355fdd74fc0035d70bb5c845e9e30a3a4654b"}, + {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:01fbfbeb8249a68d257f601deb50c70c929dc2dfe683b754659569e502fbd3aa"}, + {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c8eb19abe87029c18f226d42b8a2c9efdd139d08f8bf6e085dd9075446db450"}, + {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5344b896e79800af86ad643408ca9aa303a017f6ebff8cee5a3163c1e9aec987"}, + {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:204e0f176fd1d067671157d049466869b3ae1fc51e354708b0dc41cf94e23a3a"}, + {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a42db008d58530efa3b881eeee4991146de0b790e095f7ae43ba5cc612decbc5"}, + {file = "pyzmq-26.0.3-cp37-cp37m-win32.whl", hash = "sha256:8d7a498671ca87e32b54cb47c82a92b40130a26c5197d392720a1bce1b3c77cf"}, + {file = "pyzmq-26.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:3b4032a96410bdc760061b14ed6a33613ffb7f702181ba999df5d16fb96ba16a"}, + {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2cc4e280098c1b192c42a849de8de2c8e0f3a84086a76ec5b07bfee29bda7d18"}, + {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bde86a2ed3ce587fa2b207424ce15b9a83a9fa14422dcc1c5356a13aed3df9d"}, + {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:34106f68e20e6ff253c9f596ea50397dbd8699828d55e8fa18bd4323d8d966e6"}, + {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ebbbd0e728af5db9b04e56389e2299a57ea8b9dd15c9759153ee2455b32be6ad"}, + {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6b1d1c631e5940cac5a0b22c5379c86e8df6a4ec277c7a856b714021ab6cfad"}, + {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e891ce81edd463b3b4c3b885c5603c00141151dd9c6936d98a680c8c72fe5c67"}, + {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9b273ecfbc590a1b98f014ae41e5cf723932f3b53ba9367cfb676f838038b32c"}, + {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b32bff85fb02a75ea0b68f21e2412255b5731f3f389ed9aecc13a6752f58ac97"}, + {file = "pyzmq-26.0.3-cp38-cp38-win32.whl", hash = "sha256:f6c21c00478a7bea93caaaef9e7629145d4153b15a8653e8bb4609d4bc70dbfc"}, + {file = "pyzmq-26.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:3401613148d93ef0fd9aabdbddb212de3db7a4475367f49f590c837355343972"}, + {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:2ed8357f4c6e0daa4f3baf31832df8a33334e0fe5b020a61bc8b345a3db7a606"}, + {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c1c8f2a2ca45292084c75bb6d3a25545cff0ed931ed228d3a1810ae3758f975f"}, + {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b63731993cdddcc8e087c64e9cf003f909262b359110070183d7f3025d1c56b5"}, + {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b3cd31f859b662ac5d7f4226ec7d8bd60384fa037fc02aee6ff0b53ba29a3ba8"}, + {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:115f8359402fa527cf47708d6f8a0f8234f0e9ca0cab7c18c9c189c194dbf620"}, + {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:715bdf952b9533ba13dfcf1f431a8f49e63cecc31d91d007bc1deb914f47d0e4"}, + {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e1258c639e00bf5e8a522fec6c3eaa3e30cf1c23a2f21a586be7e04d50c9acab"}, + {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15c59e780be8f30a60816a9adab900c12a58d79c1ac742b4a8df044ab2a6d920"}, + {file = "pyzmq-26.0.3-cp39-cp39-win32.whl", hash = "sha256:d0cdde3c78d8ab5b46595054e5def32a755fc028685add5ddc7403e9f6de9879"}, + {file = "pyzmq-26.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:ce828058d482ef860746bf532822842e0ff484e27f540ef5c813d516dd8896d2"}, + {file = "pyzmq-26.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:788f15721c64109cf720791714dc14afd0f449d63f3a5487724f024345067381"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c18645ef6294d99b256806e34653e86236eb266278c8ec8112622b61db255de"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e6bc96ebe49604df3ec2c6389cc3876cabe475e6bfc84ced1bf4e630662cb35"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:971e8990c5cc4ddcff26e149398fc7b0f6a042306e82500f5e8db3b10ce69f84"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8416c23161abd94cc7da80c734ad7c9f5dbebdadfdaa77dad78244457448223"}, + {file = "pyzmq-26.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:082a2988364b60bb5de809373098361cf1dbb239623e39e46cb18bc035ed9c0c"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d57dfbf9737763b3a60d26e6800e02e04284926329aee8fb01049635e957fe81"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:77a85dca4c2430ac04dc2a2185c2deb3858a34fe7f403d0a946fa56970cf60a1"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4c82a6d952a1d555bf4be42b6532927d2a5686dd3c3e280e5f63225ab47ac1f5"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4496b1282c70c442809fc1b151977c3d967bfb33e4e17cedbf226d97de18f709"}, + {file = "pyzmq-26.0.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:e4946d6bdb7ba972dfda282f9127e5756d4f299028b1566d1245fa0d438847e6"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:03c0ae165e700364b266876d712acb1ac02693acd920afa67da2ebb91a0b3c09"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:3e3070e680f79887d60feeda051a58d0ac36622e1759f305a41059eff62c6da7"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6ca08b840fe95d1c2bd9ab92dac5685f949fc6f9ae820ec16193e5ddf603c3b2"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e76654e9dbfb835b3518f9938e565c7806976c07b37c33526b574cc1a1050480"}, + {file = "pyzmq-26.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:871587bdadd1075b112e697173e946a07d722459d20716ceb3d1bd6c64bd08ce"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d0a2d1bd63a4ad79483049b26514e70fa618ce6115220da9efdff63688808b17"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0270b49b6847f0d106d64b5086e9ad5dc8a902413b5dbbb15d12b60f9c1747a4"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:703c60b9910488d3d0954ca585c34f541e506a091a41930e663a098d3b794c67"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74423631b6be371edfbf7eabb02ab995c2563fee60a80a30829176842e71722a"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4adfbb5451196842a88fda3612e2c0414134874bffb1c2ce83ab4242ec9e027d"}, + {file = "pyzmq-26.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3516119f4f9b8671083a70b6afaa0a070f5683e431ab3dc26e9215620d7ca1ad"}, + {file = "pyzmq-26.0.3.tar.gz", hash = "sha256:dba7d9f2e047dfa2bca3b01f4f84aa5246725203d6284e3790f2ca15fba6b40a"}, +] + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + [[package]] name = "requests" version = "2.31.0" @@ -4301,6 +4761,25 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3_binary"] +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +files = [ + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + [[package]] name = "stevedore" version = "5.2.0" @@ -4656,6 +5135,26 @@ torch = "2.2.2" [package.extras] scipy = ["scipy"] +[[package]] +name = "tornado" +version = "6.4" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">= 3.8" +files = [ + {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"}, + {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"}, + {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"}, + {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"}, + {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"}, +] + [[package]] name = "tqdm" version = "4.66.4" @@ -4676,6 +5175,21 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + [[package]] name = "triton" version = "2.2.0" @@ -5035,4 +5549,4 @@ vision = ["albumentations", "gdown", "imagecorruptions", "kornia", "pycocotools" [metadata] lock-version = "2.0" python-versions = ">=3.9 <3.12" -content-hash = "f063dc8a179414ce47272aa890facfcfcc6caaa86168fac4b539e5e637ac9e65" +content-hash = "3db3cf9e456bd85357f3fbda947359320fb931e25752685872e20418fb4d298e" diff --git a/pyproject.toml b/pyproject.toml index 6057d05f..5d1d157a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -102,6 +102,7 @@ hannah-nas-eval = "hannah.nas.eval.__main__:main" [tool.poetry.group.dev.dependencies] pyre-check = "^0.9.17" pytest-xdist = "^3.1.0" +ipykernel = "^6.29.4" [build-system] diff --git a/test/test_graph_conversion.py b/test/test_graph_conversion.py index 6c4f2820..58946744 100644 --- a/test/test_graph_conversion.py +++ b/test/test_graph_conversion.py @@ -87,7 +87,7 @@ def test_graph_conversion_lazy_convnet(): def test_graph_conversion_functional_operators(): - from hannah.models.embedded_vision_net.models import search_space + from hannah.models.embedded_vision_net.models import embedded_vision_net from hannah.nas.functional_operators.executor import BasicExecutor import hannah.nas.functional_operators.operators @@ -97,7 +97,7 @@ def test_graph_conversion_functional_operators(): # space = test_net(input) - space = search_space("net", input, num_classes=10) + space = embedded_vision_net("net", input, num_classes=10) # space.sample() model = BasicExecutor(space) model.initialize() diff --git a/test/test_symbolic_metrics.py b/test/test_symbolic_metrics.py index 4f45ea0f..1e17aaeb 100644 --- a/test/test_symbolic_metrics.py +++ b/test/test_symbolic_metrics.py @@ -1,6 +1,6 @@ from omegaconf import OmegaConf import torch -from hannah.models.embedded_vision_net.models import search_space +from hannah.models.embedded_vision_net.models import embedded_vision_net from hannah.nas.functional_operators.executor import BasicExecutor from hannah.nas.functional_operators.op import Tensor from hannah.callbacks.summaries import FxMACSummaryCallback @@ -597,7 +597,7 @@ def __init__(self, model, device, example_feature_array) -> None: input_shape = [1, 3, 32, 32] input = Tensor(name="input", shape=input_shape, axis=("N", "C", "H", "W")) cons = OmegaConf.create([{"name": "weights"}, {"name": "macs"}]) - space = search_space(name="evn", input=input, num_classes=10, constraints=cons) + space = embedded_vision_net(name="evn", input=input, num_classes=10, constraints=cons) set_parametrization(PARAMS, space.parametrization()) model = BasicExecutor(space) diff --git a/test/test_weight_expression.py b/test/test_weight_expression.py index 91ff25b3..3f9a6425 100644 --- a/test/test_weight_expression.py +++ b/test/test_weight_expression.py @@ -1,7 +1,7 @@ import torch from hannah.callbacks.summaries import FxMACSummaryCallback from hannah.models.embedded_vision_net.expressions import expr_product, expr_sum -from hannah.models.embedded_vision_net.models import search_space +from hannah.models.embedded_vision_net.models import embedded_vision_net from hannah.nas.expressions.choice import Choice from hannah.nas.functional_operators.executor import BasicExecutor from hannah.nas.functional_operators.op import ChoiceOp, Tensor, Op @@ -129,7 +129,7 @@ def __init__(self, model, device, example_feature_array) -> None: self.example_feature_array = example_feature_array input = Tensor(name="input", shape=(1, 3, 32, 32), axis=("N", "C", "H", "W")) - space = search_space(name="evn", input=input, num_classes=10) + space = embedded_vision_net(name="evn", input=input, num_classes=10) # find a valid config while True: