From 03fdff1db390d18275e7f99b109e4247ec833e3a Mon Sep 17 00:00:00 2001 From: mrbean Date: Wed, 7 Jul 2021 07:15:32 +0000 Subject: [PATCH 01/10] add padding --- .gitignore | 3 + smaug/core/backend.cpp | 45 ++++----- smaug/core/backend.h | 6 +- smaug/core/network_builder.cpp | 51 ++++++----- smaug/core/types.proto | 121 +++++++++++++------------ smaug/operators/padding_op.h | 56 ++++++++++++ smaug/operators/ref/ref_padding_op.cpp | 12 +++ smaug/python/ops/nn_ops.py | 57 ++++++++++++ 8 files changed, 245 insertions(+), 106 deletions(-) create mode 100644 .gitignore create mode 100644 smaug/operators/padding_op.h create mode 100644 smaug/operators/ref/ref_padding_op.cpp diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..919494f0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +build/ +experiments/ +smaug/operators/padding_op_test.h diff --git a/smaug/core/backend.cpp b/smaug/core/backend.cpp index 3dc9adf5..9b95a917 100644 --- a/smaug/core/backend.cpp +++ b/smaug/core/backend.cpp @@ -1,38 +1,39 @@ #include "smaug/core/backend.h" #include "smaug/operators/batch_norm_op.h" +#include "smaug/operators/concat_op.h" +#include "smaug/operators/control_flow_ops.h" #include "smaug/operators/convolution_op.h" #include "smaug/operators/data_op.h" #include "smaug/operators/depthwise_convolution_op.h" #include "smaug/operators/eltwise_add_op.h" #include "smaug/operators/eltwise_mul_op.h" -#include "smaug/operators/less_op.h" -#include "smaug/operators/greater_op.h" -#include "smaug/operators/control_flow_ops.h" #include "smaug/operators/elu_op.h" +#include "smaug/operators/greater_op.h" #include "smaug/operators/inner_product_op.h" +#include "smaug/operators/less_op.h" +#include "smaug/operators/padding_op.h" #include "smaug/operators/pooling_op.h" #include "smaug/operators/relu_op.h" #include "smaug/operators/reorder_op.h" -#include "smaug/operators/concat_op.h" -#include "smaug/operators/split_op.h" -#include "smaug/operators/reshape_op.h" #include "smaug/operators/repeat_op.h" +#include "smaug/operators/reshape_op.h" #include "smaug/operators/sigmoid_op.h" -#include "smaug/operators/softmax_op.h" -#include "smaug/operators/tanh_op.h" +#include "smaug/operators/smv/smv_batch_norm_op.h" #include "smaug/operators/smv/smv_convolution_op.h" +#include "smaug/operators/smv/smv_eltwise_add_op.h" +#include "smaug/operators/smv/smv_eltwise_mul_op.h" +#include "smaug/operators/smv/smv_elu_op.h" +#include "smaug/operators/smv/smv_greater_op.h" #include "smaug/operators/smv/smv_inner_product_op.h" +#include "smaug/operators/smv/smv_less_op.h" #include "smaug/operators/smv/smv_pooling_op.h" -#include "smaug/operators/smv/smv_batch_norm_op.h" #include "smaug/operators/smv/smv_relu_op.h" -#include "smaug/operators/smv/smv_elu_op.h" -#include "smaug/operators/smv/smv_tanh_op.h" #include "smaug/operators/smv/smv_sigmoid_op.h" #include "smaug/operators/smv/smv_softmax_op.h" -#include "smaug/operators/smv/smv_eltwise_add_op.h" -#include "smaug/operators/smv/smv_eltwise_mul_op.h" -#include "smaug/operators/smv/smv_less_op.h" -#include "smaug/operators/smv/smv_greater_op.h" +#include "smaug/operators/smv/smv_tanh_op.h" +#include "smaug/operators/softmax_op.h" +#include "smaug/operators/split_op.h" +#include "smaug/operators/tanh_op.h" namespace smaug { @@ -79,6 +80,7 @@ DEF_CREATE_OP(EluOp, ReferenceBackend) DEF_CREATE_OP(SeluOp, ReferenceBackend) DEF_CREATE_OP(TanhOp, ReferenceBackend) DEF_CREATE_OP(HardTanhOp, ReferenceBackend) +DEF_CREATE_OP(PaddingOp, ReferenceBackend) DEF_CREATE_SMV_OP(ConvolutionOp) DEF_CREATE_SMV_OP(InnerProductOp) @@ -108,13 +110,15 @@ DEF_CREATE_OP(RepeatOp, SmvBackend) DEF_CREATE_OP(FlattenOp, SmvBackend) DEF_CREATE_OP(SwitchOp, SmvBackend) DEF_CREATE_OP(MergeOp, SmvBackend) +DEF_CREATE_OP(PaddingOp, SmvBackend) +// for simple tracing. namespace ref { -const unsigned kConvolutionHw = 0x0001; -const unsigned kInnerProductHw = 0x0002; -const unsigned kEltwiseOpHw = 0x0003; -const unsigned kBatchNormHw = 0x0004; -const unsigned kPoolingHw = 0x0005; +const unsigned kConvolutionHw = 0x0001; // 0x0001; +const unsigned kInnerProductHw = 0x0001; // 0x0002; +const unsigned kEltwiseOpHw = 0x0001; // 0x0003; +const unsigned kBatchNormHw = 0x0001; // 0x0004; +const unsigned kPoolingHw = 0x0001; // 0x0005; } // namespace ref namespace smv { @@ -140,5 +144,4 @@ float* spad1; float* spad2; } // namespace smv - } // namespace smaug diff --git a/smaug/core/backend.h b/smaug/core/backend.h index 1d1a24f0..6a61e261 100644 --- a/smaug/core/backend.h +++ b/smaug/core/backend.h @@ -58,6 +58,8 @@ template class EluOp; template class SeluOp; template class TanhOp; template class HardTanhOp; +template class PaddingOp; + #endif /** @@ -123,9 +125,9 @@ class ReferenceBackend { DECL_CREATE_OP(SeluOp); DECL_CREATE_OP(TanhOp); DECL_CREATE_OP(HardTanhOp); + DECL_CREATE_OP(PaddingOp); #undef DECL_CREATE_OP - }; /** @@ -238,10 +240,10 @@ class SmvBackend { DECL_CREATE_OP(FlattenOp); DECL_CREATE_OP(SwitchOp); DECL_CREATE_OP(MergeOp); + DECL_CREATE_OP(PaddingOp); #undef DECL_SMV_OP #undef DECL_CREATE_OP - }; } // namespace smaug diff --git a/smaug/core/network_builder.cpp b/smaug/core/network_builder.cpp index d9568cb7..94899253 100644 --- a/smaug/core/network_builder.cpp +++ b/smaug/core/network_builder.cpp @@ -1,56 +1,57 @@ -#include -#include #include +#include +#include -#include #include +#include #include "smaug/core/backend.h" -#include "smaug/core/tensor.h" +#include "smaug/core/graph.pb.h" #include "smaug/core/network.h" #include "smaug/core/network_builder.h" -#include "smaug/core/workspace.h" -#include "smaug/core/graph.pb.h" #include "smaug/core/node.pb.h" +#include "smaug/core/tensor.h" #include "smaug/core/tensor.pb.h" #include "smaug/core/types.pb.h" -#include "smaug/operators/common.h" +#include "smaug/core/workspace.h" #include "smaug/operators/batch_norm_op.h" +#include "smaug/operators/common.h" +#include "smaug/operators/concat_op.h" +#include "smaug/operators/control_flow_ops.h" #include "smaug/operators/convolution_op.h" #include "smaug/operators/data_op.h" #include "smaug/operators/depthwise_convolution_op.h" #include "smaug/operators/eltwise_add_op.h" #include "smaug/operators/eltwise_mul_op.h" -#include "smaug/operators/less_op.h" -#include "smaug/operators/greater_op.h" -#include "smaug/operators/control_flow_ops.h" #include "smaug/operators/elu_op.h" +#include "smaug/operators/greater_op.h" #include "smaug/operators/inner_product_op.h" +#include "smaug/operators/less_op.h" +#include "smaug/operators/padding_op.h" #include "smaug/operators/pooling_op.h" #include "smaug/operators/relu_op.h" #include "smaug/operators/reorder_op.h" -#include "smaug/operators/concat_op.h" -#include "smaug/operators/split_op.h" -#include "smaug/operators/reshape_op.h" #include "smaug/operators/repeat_op.h" +#include "smaug/operators/reshape_op.h" #include "smaug/operators/sigmoid_op.h" -#include "smaug/operators/softmax_op.h" -#include "smaug/operators/tanh_op.h" +#include "smaug/operators/smv/smv_batch_norm_op.h" #include "smaug/operators/smv/smv_convolution_op.h" +#include "smaug/operators/smv/smv_eltwise_add_op.h" +#include "smaug/operators/smv/smv_eltwise_mul_op.h" +#include "smaug/operators/smv/smv_elu_op.h" +#include "smaug/operators/smv/smv_greater_op.h" #include "smaug/operators/smv/smv_inner_product_op.h" +#include "smaug/operators/smv/smv_less_op.h" #include "smaug/operators/smv/smv_pooling_op.h" -#include "smaug/operators/smv/smv_batch_norm_op.h" #include "smaug/operators/smv/smv_relu_op.h" -#include "smaug/operators/smv/smv_elu_op.h" -#include "smaug/operators/smv/smv_tanh_op.h" #include "smaug/operators/smv/smv_sigmoid_op.h" #include "smaug/operators/smv/smv_softmax_op.h" -#include "smaug/operators/smv/smv_eltwise_add_op.h" -#include "smaug/operators/smv/smv_eltwise_mul_op.h" -#include "smaug/operators/smv/smv_less_op.h" -#include "smaug/operators/smv/smv_greater_op.h" -#include "smaug/utility/utils.h" +#include "smaug/operators/smv/smv_tanh_op.h" +#include "smaug/operators/softmax_op.h" +#include "smaug/operators/split_op.h" +#include "smaug/operators/tanh_op.h" #include "smaug/utility/debug_stream.h" +#include "smaug/utility/utils.h" using namespace smaug; using namespace std; @@ -263,6 +264,10 @@ static void createAndAddOperator(const NodeProto& node, } else if (type == OpType::Tanh) { auto op = Backend::createTanhOp(name, workspace); network->addOperator(op); + } else if (type == OpType::Padding) { // how to set this + auto op = Backend::createPaddingOp(name, workspace); + // op->setParam1(node.param1()); // not sure if we need this. + network->addOperator(op); } else if (type == OpType::HardTanh) { auto op = Backend::createHardTanhOp(name, workspace); network->addOperator(op); diff --git a/smaug/core/types.proto b/smaug/core/types.proto index 332e1b66..d545cc00 100644 --- a/smaug/core/types.proto +++ b/smaug/core/types.proto @@ -3,79 +3,80 @@ syntax = "proto3"; package smaug; enum DataType { - UnknownDataType = 0; - Int32 = 1; - Int64 = 2; - Float16 = 3; - Float32 = 4; - Float64 = 5; - Bool = 6; + UnknownDataType = 0; + Int32 = 1; + Int64 = 2; + Float16 = 3; + Float32 = 4; + Float64 = 5; + Bool = 6; } enum DataLayout { - option allow_alias = true; - UnknownLayout = 0; - NCHW = 1; - NHWC = 2; - NC = 4; - CN = 8; - NCT = 16; - NTC = 32; - N = 64; - X = 127; // Elementwise - EndDataLayout = 64; + option allow_alias = true; + UnknownLayout = 0; + NCHW = 1; + NHWC = 2; + NC = 4; + CN = 8; + NCT = 16; + NTC = 32; + N = 64; + X = 127; // Elementwise + EndDataLayout = 64; } enum DataStorageFormat { - UnknownStorageFormat = 0; - Uncompressed = 1; - CSR = 2; - PackedCSR = 3; - UncompressedHalfPrecision = 4; + UnknownStorageFormat = 0; + Uncompressed = 1; + CSR = 2; + PackedCSR = 3; + UncompressedHalfPrecision = 4; } enum OpType { - UnknownOp = 0; - Convolution3d = 1; - ConvolutionDepthwise = 2; - MaxPooling = 3; - AveragePooling = 4; - InnerProduct = 5; - BatchNorm = 6; - Data = 7; - ReLU = 8; - LReLU = 9; - ELU = 10; - SELU = 11; - Tanh = 12; - HardTanh = 13; - Sigmoid = 14; - Softmax = 15; - EltwiseAdd = 16; - Reorder = 17; - EltwiseMul = 18; - Concat = 19; - Split = 20; - Reshape = 21; - Repeat = 22; - Less = 23; - LessEqual = 24; - Greater = 25; - GreaterEqual = 26; - Switch = 27; - Merge = 28; + UnknownOp = 0; + Convolution3d = 1; + ConvolutionDepthwise = 2; + MaxPooling = 3; + AveragePooling = 4; + InnerProduct = 5; + BatchNorm = 6; + Data = 7; + ReLU = 8; + LReLU = 9; + ELU = 10; + SELU = 11; + Tanh = 12; + HardTanh = 13; + Sigmoid = 14; + Softmax = 15; + EltwiseAdd = 16; + Reorder = 17; + EltwiseMul = 18; + Concat = 19; + Split = 20; + Reshape = 21; + Repeat = 22; + Less = 23; + LessEqual = 24; + Greater = 25; + GreaterEqual = 26; + Switch = 27; + Merge = 28; + Padding = 29; } enum PaddingType { - UnknownPadding = 0; - SamePadding = 1; - ValidPadding = 2; + UnknownPadding = 0; + SamePadding = 1; + ValidPadding = 2; } enum HostMemoryAccessPolicy { - UnknownMemoryPolicy = 0; - AllDma = 1; - AllAcp = 2; - AllCache = 3; - AllAcpWithDmaForWeights = 4; + UnknownMemoryPolicy = 0; + AllDma = 1; + AllAcp = 2; + AllCache = 3; + AllAcpWithDmaForWeights = 4; } diff --git a/smaug/operators/padding_op.h b/smaug/operators/padding_op.h new file mode 100644 index 00000000..12b41aee --- /dev/null +++ b/smaug/operators/padding_op.h @@ -0,0 +1,56 @@ +#ifndef _OPERATORS_PADDING_OP_H_ +#define _OPERATORS_PADDING_OP_H_ + +#include "smaug/core/backend.h" +#include "smaug/core/operator.h" +#include "smaug/core/tensor.h" +// #include "smaug/core/tensor_utils.h" +#include "smaug/core/workspace.h" + +namespace smaug { + +/** \ingroup Operators + * \brief Pad a given tensor in different dimension. + * + * This has a software-based implementation. + * + * @tparam Backend The Backend that sets Alignment. + */ +template +class PaddingOperator : public Operator { + public: + PaddingOperator(const std::string& name, + Workspace* workspace, + const std::vector _padders) + : Operator(name, OpType::Repeat, workspace), padders(_padders), + padders(_padders) { + inputs.resize(kNumInputs, nullptr); + outputs.resize(kNumOutputs, nullptr); + } + + /** Set the number of padders of the Tensor along each dimension. */ + void setPadders(const std::vector>& _padders) { + padders = _padders; + } + + auto getPadders() { return padders; } + // A required function that implements the actual Operator logic. Leave + // this blank for now. + // void run() override { ; } + + // Optional override for testing purposes. + // void createAllTensors() override { ; } + + // Optional but recommended function to verify operator parameters. + // bool validate() override { ; } + + enum { Inputs, kNumInputs }; + enum { Outputs, kNumOutputs }; + + protected: + std::vector> padders; +}; + +} // namespace smaug + +#endif diff --git a/smaug/operators/ref/ref_padding_op.cpp b/smaug/operators/ref/ref_padding_op.cpp new file mode 100644 index 00000000..a133fa1f --- /dev/null +++ b/smaug/operators/ref/ref_padding_op.cpp @@ -0,0 +1,12 @@ +#include "smaug/core/backend.h" +#include "smaug/operators/common.h" +#include "smaug/operators/padding_op.h" + +namespace smaug { + +template <> +void PaddingOp::run() { + ; +} + +} // namespace smaug \ No newline at end of file diff --git a/smaug/python/ops/nn_ops.py b/smaug/python/ops/nn_ops.py index 1b672fdf..5b32c4ed 100644 --- a/smaug/python/ops/nn_ops.py +++ b/smaug/python/ops/nn_ops.py @@ -74,6 +74,63 @@ def compute_output_dim(input_dim, weight_dim, stride, padding): output_tensors_dims=[output_tensor_dims], output_tensor_layout=output_layout, params=params)[0] +def depthwise_convolution( + input_tensor, filter_tensor, stride, padding, name="depthwise_conv"): + """Compute a 3D depthwise Convolution given 4D `input_tensor` and `filter_tensor`. + + Args: + input_tensor: A 4D `Tensor`. + filter_tensor: A 4D `Tensor`. + stride: A list of two integers: [row_stride, col_stride]. + padding: A string from: `same`, `valid`. The zero padding options. + name: Operator name (optional). + """ + # 这个函数计算输出的维度,需要进行修改,因为我输出的维度不一样了 + # 好像还真是一样的,就是channel维度不一样 + def compute_output_dim(input_dim, weight_dim, stride, padding): + pad = 0 + if to_padding_type(padding) == types_pb2.SamePadding: + pad = weight_dim - 1 + return (input_dim - weight_dim + pad) // stride + 1 + + input_tensor, filter_tensor = array_ops.check_and_add_layout_transform( + name=name, op=types_pb2.ConvolutionDepthwise, + input_tensors=[input_tensor, filter_tensor]) + + row_idx = 2 if input_tensor.shape.layout == types_pb2.NCHW else 1 + col_idx = 3 if input_tensor.shape.layout == types_pb2.NCHW else 2 + chan_idx = 1 if input_tensor.shape.layout == types_pb2.NCHW else 3 + assert input_tensor.dims(chan_idx) == filter_tensor.dims(chan_idx), ( + "The weights must have the same number of channels as the inputs.") + output_rows = compute_output_dim(input_tensor.shape.dims[row_idx], + filter_tensor.shape.dims[row_idx], stride[0], + padding) + output_cols = compute_output_dim(input_tensor.shape.dims[col_idx], + filter_tensor.shape.dims[col_idx], stride[1], + padding) + output_layout = input_tensor.shape.layout + if output_layout == types_pb2.NCHW: + output_tensor_dims = [ + input_tensor.shape.dims[0], input_tensor.shape.dims[chan_idx], output_rows, + output_cols + ] + elif output_layout == types_pb2.NHWC: + output_tensor_dims = [ + input_tensor.shape.dims[0], output_rows, output_cols, + input_tensor.shape.dims[chan_idx] + ] + else: + assert False, "Unsupported output layout!" + params = node_pb2.Params() + params.conv_params.padding = to_padding_type(padding) + params.conv_params.stride.extend(stride) + + return common.add_node( + name=name, op=types_pb2.ConvolutionDepthwise, + input_tensors=[input_tensor, filter_tensor], + output_tensors_dims=[output_tensor_dims], + output_tensor_layout=output_layout, params=params)[0] + def batch_norm( input_tensor, mean_tensor, var_tensor, gamma_tensor, beta_tensor, activation=None, activation_params=None, name="batch_norm"): From 40646294a972ba93f6328a055c3f812c4b48cc71 Mon Sep 17 00:00:00 2001 From: mrbean Date: Sat, 10 Jul 2021 06:53:28 +0000 Subject: [PATCH 02/10] add padding --- make/Makefile.common | 1 + smaug/core/network_builder.cpp | 2 +- smaug/core/node.proto | 5 ++ smaug/operators/padding_op.h | 91 ++++++++++++++++++++------ smaug/operators/padding_op_test.cpp | 52 +++++++++++++++ smaug/operators/ref/ref_padding_op.cpp | 12 ---- 6 files changed, 131 insertions(+), 32 deletions(-) create mode 100644 smaug/operators/padding_op_test.cpp delete mode 100644 smaug/operators/ref/ref_padding_op.cpp diff --git a/make/Makefile.common b/make/Makefile.common index 2269f693..197cec48 100644 --- a/make/Makefile.common +++ b/make/Makefile.common @@ -82,6 +82,7 @@ TESTS = smaug/core/tensor_test.cpp \ smaug/operators/split_op_test.cpp \ smaug/operators/reshape_op_test.cpp \ smaug/operators/repeat_op_test.cpp \ + smaug/operators/padding_op_test.cpp \ smaug/operators/control_flow_ops_test.cpp \ smaug/operators/smv/smv_convolution_tiling_test.cpp \ smaug/operators/smv/smv_convolution_op_test.cpp \ diff --git a/smaug/core/network_builder.cpp b/smaug/core/network_builder.cpp index 94899253..4f4197cd 100644 --- a/smaug/core/network_builder.cpp +++ b/smaug/core/network_builder.cpp @@ -266,7 +266,7 @@ static void createAndAddOperator(const NodeProto& node, network->addOperator(op); } else if (type == OpType::Padding) { // how to set this auto op = Backend::createPaddingOp(name, workspace); - // op->setParam1(node.param1()); // not sure if we need this. + op->setPadder(node.params().padding_params().padding_size()); network->addOperator(op); } else if (type == OpType::HardTanh) { auto op = Backend::createHardTanhOp(name, workspace); diff --git a/smaug/core/node.proto b/smaug/core/node.proto index 3932ec65..6bd10a06 100644 --- a/smaug/core/node.proto +++ b/smaug/core/node.proto @@ -15,6 +15,10 @@ message PoolParams { repeated int32 pool_size = 2; } +message PaddingParams { + int32 padding_size = 1; +} + message ConcatParams { int32 concat_axis = 1; } @@ -52,6 +56,7 @@ message Params { PoolParams pool_params = 2; ConcatParams concat_params = 4; SplitParams split_params = 5; + PaddingParams padding_params = 6; } ActivationParams act_params = 3; } diff --git a/smaug/operators/padding_op.h b/smaug/operators/padding_op.h index 12b41aee..0febe56e 100644 --- a/smaug/operators/padding_op.h +++ b/smaug/operators/padding_op.h @@ -17,40 +17,93 @@ namespace smaug { * @tparam Backend The Backend that sets Alignment. */ template -class PaddingOperator : public Operator { +class PaddingOp : public Operator { public: - PaddingOperator(const std::string& name, - Workspace* workspace, - const std::vector _padders) - : Operator(name, OpType::Repeat, workspace), padders(_padders), - padders(_padders) { + PaddingOp(const std::string& name, + Workspace* workspace) + : Operator(name, OpType::Repeat, workspace){ + inputs.resize(kNumInputs, nullptr); + outputs.resize(kNumOutputs, nullptr); + } + + PaddingOp(const std::string& name, + Workspace* workspace, + int val) + : Operator(name, OpType::Repeat, workspace), padder(val){ inputs.resize(kNumInputs, nullptr); outputs.resize(kNumOutputs, nullptr); } /** Set the number of padders of the Tensor along each dimension. */ - void setPadders(const std::vector>& _padders) { - padders = _padders; + void setPadder(const int& val) { + padder = val; + // set output size? } - auto getPadders() { return padders; } - // A required function that implements the actual Operator logic. Leave - // this blank for now. - // void run() override { ; } + auto getPadder() { return padder; } + + void run() override { + Tensor* input = getInput(0); + Tensor* output = getOutput(0); + int ndims = input->ndims(); + std::vector inputDims = input->getShape().dims(); + std::vector outputDims = output->getShape().dims(); + /* + copyTensorRegion(Tensor* dest, + Tensor* src, + const std::vector& destOrigin, + const std::vector& srcOrigin, + const std::vector& regionSize + */ + std::vector destOrigin; + if (input->getShape().getLayout() == DataLayout::NCHW){ + destOrigin = std::vector({0, 0, padder, padder}); + } + else if(input->getShape().getLayout() == DataLayout::NHWC){ + destOrigin = std::vector({0, padder, padder, 0}); + } + else{ + assert(false && "Invalid padding data type!"); + } + std::vector srcOrigin = std::vector({0, 0, 0, 0}); + std::vector regionSize = inputDims; + copyTensorRegion(output, input, destOrigin, srcOrigin, regionSize); + } // Optional override for testing purposes. - // void createAllTensors() override { ; } + void createAllTensors() override { + Tensor* input = getInput(0); + std::vector dims = input->getShape().dims(); + if (input->getShape().getLayout() == DataLayout::NCHW){ + dims[2] += 2*padder; + dims[3] += 2*padder; + } + else if (input->getShape().getLayout() == DataLayout::NHWC){ + dims[1] += 2*padder; + dims[2] += 2*padder; + } + TensorShape shape( + dims, input->getShape().getLayout(), Backend::Alignment); + Tensor* output = new Tensor(name, shape); + workspace->addTensor(output); + outputs.at(0) = output; + } // Optional but recommended function to verify operator parameters. - // bool validate() override { ; } + bool validate() override { + if (padder < 0){ + return false; + } + return Operator::validate(); + } - enum { Inputs, kNumInputs }; - enum { Outputs, kNumOutputs }; + enum { kInputs, kNumInputs }; + enum { kOutputs, kNumOutputs }; - protected: - std::vector> padders; + private: + int padder = 0; }; } // namespace smaug -#endif +#endif \ No newline at end of file diff --git a/smaug/operators/padding_op_test.cpp b/smaug/operators/padding_op_test.cpp new file mode 100644 index 00000000..3ba065aa --- /dev/null +++ b/smaug/operators/padding_op_test.cpp @@ -0,0 +1,52 @@ +#include "catch.hpp" +#include "smaug/core/backend.h" +#include "smaug/core/tensor.h" +#include "smaug/core/smaug_test.h" +#include "smaug/operators/padding_op.h" + +using namespace smaug; + +TEST_CASE_METHOD(SmaugTest, "padding a 4D tensor", "[refop]") { + TensorShape inputShape({ 1, 2, 3, 4 }, DataLayout::NCHW); + Tensor* input = new Tensor("input", inputShape); + input->allocateStorage(); + std::vector inputValues{ + 1, 2, 3, 4, // input 0, chan 0, row 0 + 5, 6, 7, 8, // input 0, chan 0, row 1 + 9, 10, 11, 12, // input 0, chan 0, row 2 + -1, -2, -3, -4, // input 0, chan 1, row 0 + -5, -6, -7, -8, // input 0, chan 1, row 1 + -9, -10, -11, -12 // input 0, chan 1, row 2 + }; + input->fillData(inputValues.data(), inputValues.size()); + workspace()->addTensor(input); + + // Create the operator and fill it with our tensors. + auto paddingOp = new PaddingOp("padding", workspace()); + paddingOp->setInput(input, 0); + paddingOp->setPadder(1); + paddingOp->createAllTensors(); + // Allocates memory for all the output tensors created by createAllTensors. + allocateAllTensors(paddingOp); + + paddingOp->run(); + auto output = paddingOp->getOutput(0); + // Compare the output of the operator against expected values. + std::vector expected_output{ + 0, 0, 0, 0, 0, 0, // input 0, chan 0, row -1 + 0, 1, 2, 3, 4, 0, // input 0, chan 0, row 0 + 0, 5, 6, 7, 8, 0, // input 0, chan 0, row 1 + 0, 9, 10, 11, 12, 0, // input 0, chan 0, row 2 + 0, 0, 0, 0, 0, 0, // input 0, chan 0, row 3 + 0, 0, 0, 0, 0, 0, // input 0, chan 0, row -1 + 0, -1, -2, -3, -4, 0, // input 0, chan 1, row 0 + 0, -5, -6, -7, -8, 0, // input 0, chan 1, row 1 + 0, -9, -10, -11, -12, 0, // input 0, chan 1, row 2 + 0, 0, 0, 0, 0, 0, // input 0, chan 1, row 3 + }; + // This performs an approximate comparison between the tensor's output and + // the expected values. + REQUIRE(output->getShape().dims() == + std::vector{ 1, 2, 5, 6 }); + verifyOutputs(output, expected_output); +} \ No newline at end of file diff --git a/smaug/operators/ref/ref_padding_op.cpp b/smaug/operators/ref/ref_padding_op.cpp deleted file mode 100644 index a133fa1f..00000000 --- a/smaug/operators/ref/ref_padding_op.cpp +++ /dev/null @@ -1,12 +0,0 @@ -#include "smaug/core/backend.h" -#include "smaug/operators/common.h" -#include "smaug/operators/padding_op.h" - -namespace smaug { - -template <> -void PaddingOp::run() { - ; -} - -} // namespace smaug \ No newline at end of file From 9e54e3aa8717170039c19ccf2f9afc8e8a1dab73 Mon Sep 17 00:00:00 2001 From: mrbean Date: Mon, 12 Jul 2021 02:25:17 +0000 Subject: [PATCH 03/10] padding works --- smaug/operators/padding_op.h | 6 +++ smaug/operators/padding_op_test.cpp | 66 +++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/smaug/operators/padding_op.h b/smaug/operators/padding_op.h index 0febe56e..735430ba 100644 --- a/smaug/operators/padding_op.h +++ b/smaug/operators/padding_op.h @@ -48,6 +48,12 @@ class PaddingOp : public Operator { int ndims = input->ndims(); std::vector inputDims = input->getShape().dims(); std::vector outputDims = output->getShape().dims(); + int total_dim = 1; + for (int i: outputDims){ + total_dim *= i; + } + std::vector vf(total_dim, 0); + output->fillData(vf.data(), vf.size()); /* copyTensorRegion(Tensor* dest, Tensor* src, diff --git a/smaug/operators/padding_op_test.cpp b/smaug/operators/padding_op_test.cpp index 3ba065aa..e584d64d 100644 --- a/smaug/operators/padding_op_test.cpp +++ b/smaug/operators/padding_op_test.cpp @@ -7,6 +7,71 @@ using namespace smaug; TEST_CASE_METHOD(SmaugTest, "padding a 4D tensor", "[refop]") { + SECTION("zero padding"){ + TensorShape inputShape({ 1, 1, 1, 1 }, DataLayout::NCHW); + Tensor* input = new Tensor("input", inputShape); + input->allocateStorage(); + std::vector inputValues{ + 0, // input 0, chan 0, row 0 + }; + input->fillData(inputValues.data(), inputValues.size()); + workspace()->addTensor(input); + + // Create the operator and fill it with our tensors. + auto paddingOp = new PaddingOp("padding", workspace()); + paddingOp->setInput(input, 0); + paddingOp->setPadder(0); + paddingOp->createAllTensors(); + // Allocates memory for all the output tensors created by createAllTensors. + allocateAllTensors(paddingOp); + + paddingOp->run(); + auto output = paddingOp->getOutput(0); + // Compare the output of the operator against expected values. + std::vector expected_output{ + 0, + }; + // This performs an approximate comparison between the tensor's output and + // the expected values. + REQUIRE(output->getShape().dims() == + std::vector{ 1, 1, 1, 1 }); + verifyOutputs(output, expected_output); + } + + SECTION("1 value"){ + TensorShape inputShape({ 1, 1, 1, 1 }, DataLayout::NCHW); + Tensor* input = new Tensor("input", inputShape); + input->allocateStorage(); + std::vector inputValues{ + 0, // input 0, chan 0, row 0 + }; + input->fillData(inputValues.data(), inputValues.size()); + workspace()->addTensor(input); + + // Create the operator and fill it with our tensors. + auto paddingOp = new PaddingOp("padding", workspace()); + paddingOp->setInput(input, 0); + paddingOp->setPadder(1); + paddingOp->createAllTensors(); + // Allocates memory for all the output tensors created by createAllTensors. + allocateAllTensors(paddingOp); + + paddingOp->run(); + auto output = paddingOp->getOutput(0); + // Compare the output of the operator against expected values. + std::vector expected_output{ + 0, 0, 0, // input 0, chan 0, row -1 + 0, 0, 0, // input 0, chan 0, row 0 + 0, 0, 0, // input 0, chan 1, row 3 + }; + // This performs an approximate comparison between the tensor's output and + // the expected values. + REQUIRE(output->getShape().dims() == + std::vector{ 1, 1, 3, 3 }); + verifyOutputs(output, expected_output); + } + + SECTION("multiple values"){ TensorShape inputShape({ 1, 2, 3, 4 }, DataLayout::NCHW); Tensor* input = new Tensor("input", inputShape); input->allocateStorage(); @@ -49,4 +114,5 @@ TEST_CASE_METHOD(SmaugTest, "padding a 4D tensor", "[refop]") { REQUIRE(output->getShape().dims() == std::vector{ 1, 2, 5, 6 }); verifyOutputs(output, expected_output); + } } \ No newline at end of file From 1090710fd8823c8e8b2f45b82226edfb32508301 Mon Sep 17 00:00:00 2001 From: mrbean Date: Mon, 12 Jul 2021 09:01:47 +0000 Subject: [PATCH 04/10] add py op --- smaug/python/ops/array_ops.py | 30 ++++++++++++++++++++++++++++++ smaug/python/ops/nn_ops.py | 2 -- smaug/python/ops/ops_test.py | 14 ++++++++++++++ 3 files changed, 44 insertions(+), 2 deletions(-) diff --git a/smaug/python/ops/array_ops.py b/smaug/python/ops/array_ops.py index 89cae243..51af39d4 100644 --- a/smaug/python/ops/array_ops.py +++ b/smaug/python/ops/array_ops.py @@ -344,3 +344,33 @@ def check_and_add_layout_transform(name, op, input_tensors): input_tensors[i] = reorder(input_tensors[i], expected_layoutset.layouts) return input_tensors +def padding(input_tensor, padder, name="padding"): + """Construct a tensor by padding a given tensor. + + Args: + input_tensor: Input tensor. + padder: A int value that represents the padding dimension + name: Name of the operator. + + Returns: + A paded version of the input tensor. + """ + if padder < 0: + raise ValueError("The padder must be eqaul or greater than 0") + src_layout = input_tensor.shape.layout + src_dims = input_tensor.shape.dims + if src_layout == types_pb2.NCHW: + output_tensor_dims = (src_dims[0], src_dims[1], src_dims[2]+2*padder, + src_dims[3]+2*padder) + elif src_layout == types_pb2.NHWC: + output_tensor_dims = (src_dims[0], src_dims[1]+2*padder, src_dims[2]+2*padder, + src_dims[3]) + else: + raise ValueError("Only support layout as NHWC or NCHW") + params = node_pb2.Params() + params.padding_params.padding_size = padder + return common.add_node( + name=name, op=types_pb2.Padding, input_tensors=[input_tensor], + output_tensors_dims=[output_tensor_dims], + output_tensor_layout=input_tensor.shape.layout, + params=params)[0] diff --git a/smaug/python/ops/nn_ops.py b/smaug/python/ops/nn_ops.py index 5b32c4ed..d7577943 100644 --- a/smaug/python/ops/nn_ops.py +++ b/smaug/python/ops/nn_ops.py @@ -85,8 +85,6 @@ def depthwise_convolution( padding: A string from: `same`, `valid`. The zero padding options. name: Operator name (optional). """ - # 这个函数计算输出的维度,需要进行修改,因为我输出的维度不一样了 - # 好像还真是一样的,就是channel维度不一样 def compute_output_dim(input_dim, weight_dim, stride, padding): pad = 0 if to_padding_type(padding) == types_pb2.SamePadding: diff --git a/smaug/python/ops/ops_test.py b/smaug/python/ops/ops_test.py index bc3545c3..98934bc2 100755 --- a/smaug/python/ops/ops_test.py +++ b/smaug/python/ops/ops_test.py @@ -102,6 +102,8 @@ def build_test_sequential_graph(self, backend): out = array_ops.repeat(out, [4, 2], "repeat") out = array_ops.stack(out, 4, 1, "stack") out0, out1, out2, out3 = array_ops.unstack(out, 1, "unstack") + out0 = array_ops.reshape(out0, [1, 1, 8, 10], types_pb2.NCHW, "reshape") + out0 = array_ops.padding(out0, 1, "padding") self.test_graph, _ = graph.to_proto() self.backend = backend @@ -432,6 +434,18 @@ def test_repeat_op(self): self.assertEqual(node.output_tensors[0].shape.layout, types_pb2.NC) self.assertEqual(node.output_tensors[0].shape.alignment, self.alignment) + def test_padding_op(self): + node = self.get_node("padding") + self.assertEqual(node.op, types_pb2.Padding) + self.assertEqual(len(node.input_tensors), 1) + self.assertEqual(len(node.output_tensors), 1) + # Output tensor + self.assertEqual(node.output_tensors[0].name, "padding/output0") + self.assertEqual(node.output_tensors[0].data_type, self.expected_dtype) + self.assertEqual(node.output_tensors[0].shape.dims, [1, 1, 10, 12]) + self.assertEqual(node.output_tensors[0].shape.layout, types_pb2.NCHW) + self.assertEqual(node.output_tensors[0].shape.alignment, self.alignment) + def test_stack_op(self): # stack op is implemented using expand_dims and repeat. Here we only test # the output. From 1b17e2a104a083aa3c08d26286662f83abb4fda9 Mon Sep 17 00:00:00 2001 From: mrbean Date: Tue, 13 Jul 2021 07:44:06 +0000 Subject: [PATCH 05/10] fix tracer --- smaug/operators/padding_op.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/smaug/operators/padding_op.h b/smaug/operators/padding_op.h index 735430ba..af164638 100644 --- a/smaug/operators/padding_op.h +++ b/smaug/operators/padding_op.h @@ -40,7 +40,7 @@ class PaddingOp : public Operator { // set output size? } - auto getPadder() { return padder; } + int getPadder() { return padder; } void run() override { Tensor* input = getInput(0); From 02ef71e48ac081492e8849845259453a388e96c1 Mon Sep 17 00:00:00 2001 From: mrbean Date: Wed, 14 Jul 2021 14:12:49 +0000 Subject: [PATCH 06/10] update padding --- .gitignore | 7 +- smaug/core/backend.cpp | 10 +- smaug/core/network_builder.cpp | 4 +- smaug/core/node.proto | 2 +- smaug/core/types.proto | 122 +++++++-------- smaug/operators/padding_op.h | 107 ++++++------- smaug/operators/padding_op_test.cpp | 235 ++++++++++++++++------------ smaug/python/ops/array_ops.py | 23 ++- smaug/python/ops/nn_ops.py | 55 ------- smaug/python/ops/ops_test.py | 2 +- 10 files changed, 265 insertions(+), 302 deletions(-) diff --git a/.gitignore b/.gitignore index 919494f0..48e912ee 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +tags build/ -experiments/ -smaug/operators/padding_op_test.h +*.pyc +__pycache__ +*.swp +*.swo \ No newline at end of file diff --git a/smaug/core/backend.cpp b/smaug/core/backend.cpp index 9b95a917..2e049f60 100644 --- a/smaug/core/backend.cpp +++ b/smaug/core/backend.cpp @@ -114,11 +114,11 @@ DEF_CREATE_OP(PaddingOp, SmvBackend) // for simple tracing. namespace ref { -const unsigned kConvolutionHw = 0x0001; // 0x0001; -const unsigned kInnerProductHw = 0x0001; // 0x0002; -const unsigned kEltwiseOpHw = 0x0001; // 0x0003; -const unsigned kBatchNormHw = 0x0001; // 0x0004; -const unsigned kPoolingHw = 0x0001; // 0x0005; +const unsigned kConvolutionHw = 0x0001; +const unsigned kInnerProductHw = 0x0002; +const unsigned kEltwiseOpHw = 0x0003; +const unsigned kBatchNormHw = 0x0004; +const unsigned kPoolingHw = 0x0005; } // namespace ref namespace smv { diff --git a/smaug/core/network_builder.cpp b/smaug/core/network_builder.cpp index 4f4197cd..1738b48c 100644 --- a/smaug/core/network_builder.cpp +++ b/smaug/core/network_builder.cpp @@ -264,9 +264,9 @@ static void createAndAddOperator(const NodeProto& node, } else if (type == OpType::Tanh) { auto op = Backend::createTanhOp(name, workspace); network->addOperator(op); - } else if (type == OpType::Padding) { // how to set this + } else if (type == OpType::Padding) { auto op = Backend::createPaddingOp(name, workspace); - op->setPadder(node.params().padding_params().padding_size()); + op->setPaddingSize(node.params().padding_params().padding_size()); network->addOperator(op); } else if (type == OpType::HardTanh) { auto op = Backend::createHardTanhOp(name, workspace); diff --git a/smaug/core/node.proto b/smaug/core/node.proto index 6bd10a06..5a6d73ff 100644 --- a/smaug/core/node.proto +++ b/smaug/core/node.proto @@ -16,7 +16,7 @@ message PoolParams { } message PaddingParams { - int32 padding_size = 1; + repeated int32 padding_size = 1; } message ConcatParams { diff --git a/smaug/core/types.proto b/smaug/core/types.proto index d545cc00..2411c472 100644 --- a/smaug/core/types.proto +++ b/smaug/core/types.proto @@ -3,80 +3,80 @@ syntax = "proto3"; package smaug; enum DataType { - UnknownDataType = 0; - Int32 = 1; - Int64 = 2; - Float16 = 3; - Float32 = 4; - Float64 = 5; - Bool = 6; + UnknownDataType = 0; + Int32 = 1; + Int64 = 2; + Float16 = 3; + Float32 = 4; + Float64 = 5; + Bool = 6; } enum DataLayout { - option allow_alias = true; - UnknownLayout = 0; - NCHW = 1; - NHWC = 2; - NC = 4; - CN = 8; - NCT = 16; - NTC = 32; - N = 64; - X = 127; // Elementwise - EndDataLayout = 64; + option allow_alias = true; + UnknownLayout = 0; + NCHW = 1; + NHWC = 2; + NC = 4; + CN = 8; + NCT = 16; + NTC = 32; + N = 64; + X = 127; // Elementwise + EndDataLayout = 64; } enum DataStorageFormat { - UnknownStorageFormat = 0; - Uncompressed = 1; - CSR = 2; - PackedCSR = 3; - UncompressedHalfPrecision = 4; + UnknownStorageFormat = 0; + Uncompressed = 1; + CSR = 2; + PackedCSR = 3; + UncompressedHalfPrecision = 4; } enum OpType { - UnknownOp = 0; - Convolution3d = 1; - ConvolutionDepthwise = 2; - MaxPooling = 3; - AveragePooling = 4; - InnerProduct = 5; - BatchNorm = 6; - Data = 7; - ReLU = 8; - LReLU = 9; - ELU = 10; - SELU = 11; - Tanh = 12; - HardTanh = 13; - Sigmoid = 14; - Softmax = 15; - EltwiseAdd = 16; - Reorder = 17; - EltwiseMul = 18; - Concat = 19; - Split = 20; - Reshape = 21; - Repeat = 22; - Less = 23; - LessEqual = 24; - Greater = 25; - GreaterEqual = 26; - Switch = 27; - Merge = 28; - Padding = 29; + UnknownOp = 0; + Convolution3d = 1; + ConvolutionDepthwise = 2; + MaxPooling = 3; + AveragePooling = 4; + InnerProduct = 5; + BatchNorm = 6; + Data = 7; + ReLU = 8; + LReLU = 9; + ELU = 10; + SELU = 11; + Tanh = 12; + HardTanh = 13; + Sigmoid = 14; + Softmax = 15; + EltwiseAdd = 16; + Reorder = 17; + EltwiseMul = 18; + Concat = 19; + Split = 20; + Reshape = 21; + Repeat = 22; + Less = 23; + LessEqual = 24; + Greater = 25; + GreaterEqual = 26; + Switch = 27; + Merge = 28; + Padding = 29; } enum PaddingType { - UnknownPadding = 0; - SamePadding = 1; - ValidPadding = 2; + UnknownPadding = 0; + SamePadding = 1; + ValidPadding = 2; } enum HostMemoryAccessPolicy { - UnknownMemoryPolicy = 0; - AllDma = 1; - AllAcp = 2; - AllCache = 3; - AllAcpWithDmaForWeights = 4; + UnknownMemoryPolicy = 0; + AllDma = 1; + AllAcp = 2; + AllCache = 3; + AllAcpWithDmaForWeights = 4; } diff --git a/smaug/operators/padding_op.h b/smaug/operators/padding_op.h index af164638..da2297a8 100644 --- a/smaug/operators/padding_op.h +++ b/smaug/operators/padding_op.h @@ -4,13 +4,14 @@ #include "smaug/core/backend.h" #include "smaug/core/operator.h" #include "smaug/core/tensor.h" -// #include "smaug/core/tensor_utils.h" #include "smaug/core/workspace.h" +#include +using namespace google::protobuf; namespace smaug { /** \ingroup Operators - * \brief Pad a given tensor in different dimension. + * \brief Pad a given tensor in any number of dimensions with arbitrary size. * * This has a software-based implementation. * @@ -19,95 +20,77 @@ namespace smaug { template class PaddingOp : public Operator { public: - PaddingOp(const std::string& name, - Workspace* workspace) - : Operator(name, OpType::Repeat, workspace){ + PaddingOp(const std::string& name, Workspace* workspace) + : Operator(name, OpType::Padding, workspace) { inputs.resize(kNumInputs, nullptr); outputs.resize(kNumOutputs, nullptr); } - PaddingOp(const std::string& name, - Workspace* workspace, - int val) - : Operator(name, OpType::Repeat, workspace), padder(val){ - inputs.resize(kNumInputs, nullptr); - outputs.resize(kNumOutputs, nullptr); + /** + * Set the paddingSize of the Tensor along each dimension. + * The paddingSize is orgainized as + */ + void setPaddingSize(RepeatedField const& val) { + std::vector paddingSize(val.begin(), val.end()); } - /** Set the number of padders of the Tensor along each dimension. */ - void setPadder(const int& val) { - padder = val; - // set output size? - } + void setPaddingSize(std::vector const& val) { paddingSize = val; } - int getPadder() { return padder; } + std::vector getPaddingSize() { return paddingSize; } void run() override { - Tensor* input = getInput(0); - Tensor* output = getOutput(0); - int ndims = input->ndims(); - std::vector inputDims = input->getShape().dims(); - std::vector outputDims = output->getShape().dims(); - int total_dim = 1; - for (int i: outputDims){ - total_dim *= i; - } - std::vector vf(total_dim, 0); - output->fillData(vf.data(), vf.size()); - /* - copyTensorRegion(Tensor* dest, - Tensor* src, - const std::vector& destOrigin, - const std::vector& srcOrigin, - const std::vector& regionSize - */ - std::vector destOrigin; - if (input->getShape().getLayout() == DataLayout::NCHW){ - destOrigin = std::vector({0, 0, padder, padder}); - } - else if(input->getShape().getLayout() == DataLayout::NHWC){ - destOrigin = std::vector({0, padder, padder, 0}); - } - else{ - assert(false && "Invalid padding data type!"); - } - std::vector srcOrigin = std::vector({0, 0, 0, 0}); - std::vector regionSize = inputDims; - copyTensorRegion(output, input, destOrigin, srcOrigin, regionSize); + Tensor* input = getInput(0); + Tensor* output = getOutput(0); + int ndims = input->ndims(); + const std::vector inputDims = input->getShape().dims(); + const std::vector outputDims = output->getShape().dims(); + int total_dim = 1; + for (int i : outputDims) { + total_dim *= i; + } + std::vector vf(total_dim, 0); + output->fillData(vf.data(), vf.size()); + std::vector destOrigin, paddingBegin, srcOrigin; + for (int i = 0; i < ndims; i++) { + paddingBegin.push_back(paddingSize[2 * i]); + srcOrigin.push_back(0); + } + destOrigin = std::vector(paddingBegin); + std::vector regionSize = inputDims; + copyTensorRegion(output, input, destOrigin, srcOrigin, regionSize); } // Optional override for testing purposes. void createAllTensors() override { Tensor* input = getInput(0); + int ndims = input->ndims(); std::vector dims = input->getShape().dims(); - if (input->getShape().getLayout() == DataLayout::NCHW){ - dims[2] += 2*padder; - dims[3] += 2*padder; - } - else if (input->getShape().getLayout() == DataLayout::NHWC){ - dims[1] += 2*padder; - dims[2] += 2*padder; + for (int i = 0; i < ndims; i++) { + dims[i] += (paddingSize[2 * i] + paddingSize[2 * i + 1]); } TensorShape shape( dims, input->getShape().getLayout(), Backend::Alignment); Tensor* output = new Tensor(name, shape); workspace->addTensor(output); outputs.at(0) = output; - } + } // Optional but recommended function to verify operator parameters. bool validate() override { - if (padder < 0){ - return false; - } + Tensor* input = getInput(0); + int ndims = input->ndims(); + if (paddingSize.size() != 2 * ndims) { + return false; + } return Operator::validate(); } - + enum { kInputs, kNumInputs }; enum { kOutputs, kNumOutputs }; - private: - int padder = 0; + private: + std::vector paddingSize = {}; }; } // namespace smaug diff --git a/smaug/operators/padding_op_test.cpp b/smaug/operators/padding_op_test.cpp index e584d64d..edaae78c 100644 --- a/smaug/operators/padding_op_test.cpp +++ b/smaug/operators/padding_op_test.cpp @@ -1,118 +1,155 @@ #include "catch.hpp" #include "smaug/core/backend.h" -#include "smaug/core/tensor.h" #include "smaug/core/smaug_test.h" +#include "smaug/core/tensor.h" #include "smaug/operators/padding_op.h" using namespace smaug; -TEST_CASE_METHOD(SmaugTest, "padding a 4D tensor", "[refop]") { - SECTION("zero padding"){ - TensorShape inputShape({ 1, 1, 1, 1 }, DataLayout::NCHW); - Tensor* input = new Tensor("input", inputShape); - input->allocateStorage(); - std::vector inputValues{ - 0, // input 0, chan 0, row 0 - }; - input->fillData(inputValues.data(), inputValues.size()); - workspace()->addTensor(input); +TEST_CASE_METHOD(SmaugTest, "padding a tensor", "[refop]") { + SECTION("4D zero padding") { + TensorShape inputShape({ 1, 1, 1, 1 }, DataLayout::NCHW); + Tensor* input = new Tensor("input", inputShape); + input->allocateStorage(); + std::vector inputValues{ + 0, // input 0, chan 0, row 0 + }; + input->fillData(inputValues.data(), inputValues.size()); + workspace()->addTensor(input); + + // Create the operator and fill it with our tensors. + auto paddingOp = + new PaddingOp("padding", workspace()); + paddingOp->setInput(input, 0); + paddingOp->setPaddingSize({ 0, 0, 0, 0, 0, 0, 0, 0 }); + paddingOp->createAllTensors(); + // Allocates memory for all the output tensors created by + // createAllTensors. + allocateAllTensors(paddingOp); + + paddingOp->run(); + auto output = paddingOp->getOutput(0); + // Compare the output of the operator against expected values. + std::vector expected_output{ + 0, + }; + // This performs an approximate comparison between the tensor's output + // and the expected values. + REQUIRE(output->getShape().dims() == std::vector{ 1, 1, 1, 1 }); + verifyOutputs(output, expected_output); + } + + SECTION("2D 1 value") { + TensorShape inputShape({ 1, 1 }, DataLayout::NC); + Tensor* input = new Tensor("input", inputShape); + input->allocateStorage(); + std::vector inputValues{ + 1, // input 0, chan 0 + }; + input->fillData(inputValues.data(), inputValues.size()); + workspace()->addTensor(input); - // Create the operator and fill it with our tensors. - auto paddingOp = new PaddingOp("padding", workspace()); - paddingOp->setInput(input, 0); - paddingOp->setPadder(0); - paddingOp->createAllTensors(); - // Allocates memory for all the output tensors created by createAllTensors. - allocateAllTensors(paddingOp); + // Create the operator and fill it with our tensors. + auto paddingOp = + new PaddingOp("padding", workspace()); + paddingOp->setInput(input, 0); + paddingOp->setPaddingSize({ 0, 0, 1, 1 }); + paddingOp->createAllTensors(); + // Allocates memory for all the output tensors created by + // createAllTensors. + allocateAllTensors(paddingOp); - paddingOp->run(); - auto output = paddingOp->getOutput(0); - // Compare the output of the operator against expected values. - std::vector expected_output{ - 0, - }; - // This performs an approximate comparison between the tensor's output and - // the expected values. - REQUIRE(output->getShape().dims() == - std::vector{ 1, 1, 1, 1 }); - verifyOutputs(output, expected_output); + paddingOp->run(); + auto output = paddingOp->getOutput(0); + // Compare the output of the operator against expected values. + std::vector expected_output{ + 0, // input 0, chan -1 + 1, // input 0, chan 0 + 0, // input 0, chan 1 + }; + // This performs an approximate comparison between the tensor's output + // and the expected values. + REQUIRE(output->getShape().dims() == std::vector{ 1, 3 }); + verifyOutputs(output, expected_output); } - SECTION("1 value"){ - TensorShape inputShape({ 1, 1, 1, 1 }, DataLayout::NCHW); - Tensor* input = new Tensor("input", inputShape); - input->allocateStorage(); - std::vector inputValues{ - 0, // input 0, chan 0, row 0 - }; - input->fillData(inputValues.data(), inputValues.size()); - workspace()->addTensor(input); + SECTION("4D 1 value") { + TensorShape inputShape({ 1, 1, 1, 1 }, DataLayout::NCHW); + Tensor* input = new Tensor("input", inputShape); + input->allocateStorage(); + std::vector inputValues{ + 0, // input 0, chan 0, row 0 + }; + input->fillData(inputValues.data(), inputValues.size()); + workspace()->addTensor(input); - // Create the operator and fill it with our tensors. - auto paddingOp = new PaddingOp("padding", workspace()); - paddingOp->setInput(input, 0); - paddingOp->setPadder(1); - paddingOp->createAllTensors(); - // Allocates memory for all the output tensors created by createAllTensors. - allocateAllTensors(paddingOp); + // Create the operator and fill it with our tensors. + auto paddingOp = + new PaddingOp("padding", workspace()); + paddingOp->setInput(input, 0); + paddingOp->setPaddingSize({ 0, 0, 0, 0, 1, 1, 1, 1 }); + paddingOp->createAllTensors(); + // Allocates memory for all the output tensors created by + // createAllTensors. + allocateAllTensors(paddingOp); - paddingOp->run(); - auto output = paddingOp->getOutput(0); - // Compare the output of the operator against expected values. - std::vector expected_output{ - 0, 0, 0, // input 0, chan 0, row -1 - 0, 0, 0, // input 0, chan 0, row 0 - 0, 0, 0, // input 0, chan 1, row 3 - }; - // This performs an approximate comparison between the tensor's output and - // the expected values. - REQUIRE(output->getShape().dims() == - std::vector{ 1, 1, 3, 3 }); - verifyOutputs(output, expected_output); + paddingOp->run(); + auto output = paddingOp->getOutput(0); + // Compare the output of the operator against expected values. + std::vector expected_output{ + 0, 0, 0, // input 0, chan 0, row -1 + 0, 0, 0, // input 0, chan 0, row 0 + 0, 0, 0, // input 0, chan 1, row 3 + }; + // This performs an approximate comparison between the tensor's output + // and the expected values. + REQUIRE(output->getShape().dims() == std::vector{ 1, 1, 3, 3 }); + verifyOutputs(output, expected_output); } - SECTION("multiple values"){ - TensorShape inputShape({ 1, 2, 3, 4 }, DataLayout::NCHW); - Tensor* input = new Tensor("input", inputShape); - input->allocateStorage(); - std::vector inputValues{ - 1, 2, 3, 4, // input 0, chan 0, row 0 - 5, 6, 7, 8, // input 0, chan 0, row 1 - 9, 10, 11, 12, // input 0, chan 0, row 2 - -1, -2, -3, -4, // input 0, chan 1, row 0 - -5, -6, -7, -8, // input 0, chan 1, row 1 - -9, -10, -11, -12 // input 0, chan 1, row 2 - }; - input->fillData(inputValues.data(), inputValues.size()); - workspace()->addTensor(input); + SECTION("4D multiple values") { + TensorShape inputShape({ 1, 2, 3, 4 }, DataLayout::NCHW); + Tensor* input = new Tensor("input", inputShape); + input->allocateStorage(); + std::vector inputValues{ + 1, 2, 3, 4, // input 0, chan 0, row 0 + 5, 6, 7, 8, // input 0, chan 0, row 1 + 9, 10, 11, 12, // input 0, chan 0, row 2 + -1, -2, -3, -4, // input 0, chan 1, row 0 + -5, -6, -7, -8, // input 0, chan 1, row 1 + -9, -10, -11, -12 // input 0, chan 1, row 2 + }; + input->fillData(inputValues.data(), inputValues.size()); + workspace()->addTensor(input); - // Create the operator and fill it with our tensors. - auto paddingOp = new PaddingOp("padding", workspace()); - paddingOp->setInput(input, 0); - paddingOp->setPadder(1); - paddingOp->createAllTensors(); - // Allocates memory for all the output tensors created by createAllTensors. - allocateAllTensors(paddingOp); + // Create the operator and fill it with our tensors. + auto paddingOp = + new PaddingOp("padding", workspace()); + paddingOp->setInput(input, 0); + paddingOp->setPaddingSize({ 0, 0, 0, 0, 1, 1, 1, 1 }); + paddingOp->createAllTensors(); + // Allocates memory for all the output tensors created by + // createAllTensors. + allocateAllTensors(paddingOp); - paddingOp->run(); - auto output = paddingOp->getOutput(0); - // Compare the output of the operator against expected values. - std::vector expected_output{ - 0, 0, 0, 0, 0, 0, // input 0, chan 0, row -1 - 0, 1, 2, 3, 4, 0, // input 0, chan 0, row 0 - 0, 5, 6, 7, 8, 0, // input 0, chan 0, row 1 - 0, 9, 10, 11, 12, 0, // input 0, chan 0, row 2 - 0, 0, 0, 0, 0, 0, // input 0, chan 0, row 3 - 0, 0, 0, 0, 0, 0, // input 0, chan 0, row -1 - 0, -1, -2, -3, -4, 0, // input 0, chan 1, row 0 - 0, -5, -6, -7, -8, 0, // input 0, chan 1, row 1 - 0, -9, -10, -11, -12, 0, // input 0, chan 1, row 2 - 0, 0, 0, 0, 0, 0, // input 0, chan 1, row 3 - }; - // This performs an approximate comparison between the tensor's output and - // the expected values. - REQUIRE(output->getShape().dims() == - std::vector{ 1, 2, 5, 6 }); - verifyOutputs(output, expected_output); + paddingOp->run(); + auto output = paddingOp->getOutput(0); + // Compare the output of the operator against expected values. + std::vector expected_output{ + 0, 0, 0, 0, 0, 0, // input 0, chan 0, row -1 + 0, 1, 2, 3, 4, 0, // input 0, chan 0, row 0 + 0, 5, 6, 7, 8, 0, // input 0, chan 0, row 1 + 0, 9, 10, 11, 12, 0, // input 0, chan 0, row 2 + 0, 0, 0, 0, 0, 0, // input 0, chan 0, row 3 + 0, 0, 0, 0, 0, 0, // input 0, chan 0, row -1 + 0, -1, -2, -3, -4, 0, // input 0, chan 1, row 0 + 0, -5, -6, -7, -8, 0, // input 0, chan 1, row 1 + 0, -9, -10, -11, -12, 0, // input 0, chan 1, row 2 + 0, 0, 0, 0, 0, 0, // input 0, chan 1, row 3 + }; + // This performs an approximate comparison between the tensor's output + // and the expected values. + REQUIRE(output->getShape().dims() == std::vector{ 1, 2, 5, 6 }); + verifyOutputs(output, expected_output); } } \ No newline at end of file diff --git a/smaug/python/ops/array_ops.py b/smaug/python/ops/array_ops.py index 51af39d4..a3ee26c8 100644 --- a/smaug/python/ops/array_ops.py +++ b/smaug/python/ops/array_ops.py @@ -344,31 +344,26 @@ def check_and_add_layout_transform(name, op, input_tensors): input_tensors[i] = reorder(input_tensors[i], expected_layoutset.layouts) return input_tensors -def padding(input_tensor, padder, name="padding"): +def padding(input_tensor, padding_size, name="padding"): """Construct a tensor by padding a given tensor. Args: input_tensor: Input tensor. - padder: A int value that represents the padding dimension + padding_size: A list that contains number of values padded to each dimension. name: Name of the operator. Returns: - A paded version of the input tensor. + A padded version of the input tensor. """ - if padder < 0: - raise ValueError("The padder must be eqaul or greater than 0") src_layout = input_tensor.shape.layout src_dims = input_tensor.shape.dims - if src_layout == types_pb2.NCHW: - output_tensor_dims = (src_dims[0], src_dims[1], src_dims[2]+2*padder, - src_dims[3]+2*padder) - elif src_layout == types_pb2.NHWC: - output_tensor_dims = (src_dims[0], src_dims[1]+2*padder, src_dims[2]+2*padder, - src_dims[3]) - else: - raise ValueError("Only support layout as NHWC or NCHW") + if len(padding_size) != 2 * len(src_dims): + raise ValueError("The padding_size's dimension must be two times as the input_tensor") + output_tensor_dims = [0] * len(src_dims) + for i in range(len(src_dims)): + output_tensor_dims[i] = src_dims[i] + padding_size[2 * i] + padding_size[2 * i+1] params = node_pb2.Params() - params.padding_params.padding_size = padder + params.padding_params.padding_size.extend(padding_size) return common.add_node( name=name, op=types_pb2.Padding, input_tensors=[input_tensor], output_tensors_dims=[output_tensor_dims], diff --git a/smaug/python/ops/nn_ops.py b/smaug/python/ops/nn_ops.py index d7577943..1b672fdf 100644 --- a/smaug/python/ops/nn_ops.py +++ b/smaug/python/ops/nn_ops.py @@ -74,61 +74,6 @@ def compute_output_dim(input_dim, weight_dim, stride, padding): output_tensors_dims=[output_tensor_dims], output_tensor_layout=output_layout, params=params)[0] -def depthwise_convolution( - input_tensor, filter_tensor, stride, padding, name="depthwise_conv"): - """Compute a 3D depthwise Convolution given 4D `input_tensor` and `filter_tensor`. - - Args: - input_tensor: A 4D `Tensor`. - filter_tensor: A 4D `Tensor`. - stride: A list of two integers: [row_stride, col_stride]. - padding: A string from: `same`, `valid`. The zero padding options. - name: Operator name (optional). - """ - def compute_output_dim(input_dim, weight_dim, stride, padding): - pad = 0 - if to_padding_type(padding) == types_pb2.SamePadding: - pad = weight_dim - 1 - return (input_dim - weight_dim + pad) // stride + 1 - - input_tensor, filter_tensor = array_ops.check_and_add_layout_transform( - name=name, op=types_pb2.ConvolutionDepthwise, - input_tensors=[input_tensor, filter_tensor]) - - row_idx = 2 if input_tensor.shape.layout == types_pb2.NCHW else 1 - col_idx = 3 if input_tensor.shape.layout == types_pb2.NCHW else 2 - chan_idx = 1 if input_tensor.shape.layout == types_pb2.NCHW else 3 - assert input_tensor.dims(chan_idx) == filter_tensor.dims(chan_idx), ( - "The weights must have the same number of channels as the inputs.") - output_rows = compute_output_dim(input_tensor.shape.dims[row_idx], - filter_tensor.shape.dims[row_idx], stride[0], - padding) - output_cols = compute_output_dim(input_tensor.shape.dims[col_idx], - filter_tensor.shape.dims[col_idx], stride[1], - padding) - output_layout = input_tensor.shape.layout - if output_layout == types_pb2.NCHW: - output_tensor_dims = [ - input_tensor.shape.dims[0], input_tensor.shape.dims[chan_idx], output_rows, - output_cols - ] - elif output_layout == types_pb2.NHWC: - output_tensor_dims = [ - input_tensor.shape.dims[0], output_rows, output_cols, - input_tensor.shape.dims[chan_idx] - ] - else: - assert False, "Unsupported output layout!" - params = node_pb2.Params() - params.conv_params.padding = to_padding_type(padding) - params.conv_params.stride.extend(stride) - - return common.add_node( - name=name, op=types_pb2.ConvolutionDepthwise, - input_tensors=[input_tensor, filter_tensor], - output_tensors_dims=[output_tensor_dims], - output_tensor_layout=output_layout, params=params)[0] - def batch_norm( input_tensor, mean_tensor, var_tensor, gamma_tensor, beta_tensor, activation=None, activation_params=None, name="batch_norm"): diff --git a/smaug/python/ops/ops_test.py b/smaug/python/ops/ops_test.py index 98934bc2..9c6123a5 100755 --- a/smaug/python/ops/ops_test.py +++ b/smaug/python/ops/ops_test.py @@ -103,7 +103,7 @@ def build_test_sequential_graph(self, backend): out = array_ops.stack(out, 4, 1, "stack") out0, out1, out2, out3 = array_ops.unstack(out, 1, "unstack") out0 = array_ops.reshape(out0, [1, 1, 8, 10], types_pb2.NCHW, "reshape") - out0 = array_ops.padding(out0, 1, "padding") + out0 = array_ops.padding(out0, [0, 0, 0, 0, 1, 1, 1, 1], "padding") self.test_graph, _ = graph.to_proto() self.backend = backend From c89fa42995aae0066c6a2d538f6e50d60dad9c93 Mon Sep 17 00:00:00 2001 From: mrbean Date: Thu, 15 Jul 2021 05:05:53 +0000 Subject: [PATCH 07/10] update padding with more test and documentation --- smaug/operators/padding_op.h | 4 +-- smaug/operators/padding_op_test.cpp | 46 +++++++++++++++++++++++++++++ smaug/python/ops/array_ops.py | 3 +- 3 files changed, 50 insertions(+), 3 deletions(-) diff --git a/smaug/operators/padding_op.h b/smaug/operators/padding_op.h index da2297a8..672a829b 100644 --- a/smaug/operators/padding_op.h +++ b/smaug/operators/padding_op.h @@ -32,12 +32,12 @@ class PaddingOp : public Operator { * ,dimk_backward> */ void setPaddingSize(RepeatedField const& val) { - std::vector paddingSize(val.begin(), val.end()); + std::vector paddingSize(val.begin(), val.end()); } void setPaddingSize(std::vector const& val) { paddingSize = val; } - std::vector getPaddingSize() { return paddingSize; } + const std::vector getPaddingSize() { return paddingSize; } void run() override { Tensor* input = getInput(0); diff --git a/smaug/operators/padding_op_test.cpp b/smaug/operators/padding_op_test.cpp index edaae78c..8b5dcda9 100644 --- a/smaug/operators/padding_op_test.cpp +++ b/smaug/operators/padding_op_test.cpp @@ -152,4 +152,50 @@ TEST_CASE_METHOD(SmaugTest, "padding a tensor", "[refop]") { REQUIRE(output->getShape().dims() == std::vector{ 1, 2, 5, 6 }); verifyOutputs(output, expected_output); } + + SECTION("4D multiple values asymmetric padding") { + TensorShape inputShape({ 1, 2, 3, 4 }, DataLayout::NCHW); + Tensor* input = new Tensor("input", inputShape); + input->allocateStorage(); + std::vector inputValues{ + 1, 2, 3, 4, // input 0, chan 0, row 0 + 5, 6, 7, 8, // input 0, chan 0, row 1 + 9, 10, 11, 12, // input 0, chan 0, row 2 + -1, -2, -3, -4, // input 0, chan 1, row 0 + -5, -6, -7, -8, // input 0, chan 1, row 1 + -9, -10, -11, -12 // input 0, chan 1, row 2 + }; + input->fillData(inputValues.data(), inputValues.size()); + workspace()->addTensor(input); + + // Create the operator and fill it with our tensors. + auto paddingOp = + new PaddingOp("padding", workspace()); + paddingOp->setInput(input, 0); + paddingOp->setPaddingSize({ 0, 0, 0, 0, 1, 1, 1, 2 }); + paddingOp->createAllTensors(); + // Allocates memory for all the output tensors created by + // createAllTensors. + allocateAllTensors(paddingOp); + + paddingOp->run(); + auto output = paddingOp->getOutput(0); + // Compare the output of the operator against expected values. + std::vector expected_output{ + 0, 0, 0, 0, 0, 0, 0, // input 0, chan 0, row -1 + 0, 1, 2, 3, 4, 0, 0, // input 0, chan 0, row 0 + 0, 5, 6, 7, 8, 0, 0, // input 0, chan 0, row 1 + 0, 9, 10, 11, 12, 0, 0, // input 0, chan 0, row 2 + 0, 0, 0, 0, 0, 0, 0, // input 0, chan 0, row 3 + 0, 0, 0, 0, 0, 0, 0, // input 0, chan 0, row -1 + 0, -1, -2, -3, -4, 0, 0, // input 0, chan 1, row 0 + 0, -5, -6, -7, -8, 0, 0, // input 0, chan 1, row 1 + 0, -9, -10, -11, -12, 0, 0, // input 0, chan 1, row 2 + 0, 0, 0, 0, 0, 0, 0, // input 0, chan 1, row 3 + }; + // This performs an approximate comparison between the tensor's output + // and the expected values. + REQUIRE(output->getShape().dims() == std::vector{ 1, 2, 5, 7 }); + verifyOutputs(output, expected_output); + } } \ No newline at end of file diff --git a/smaug/python/ops/array_ops.py b/smaug/python/ops/array_ops.py index a3ee26c8..d89941a9 100644 --- a/smaug/python/ops/array_ops.py +++ b/smaug/python/ops/array_ops.py @@ -349,7 +349,8 @@ def padding(input_tensor, padding_size, name="padding"): Args: input_tensor: Input tensor. - padding_size: A list that contains number of values padded to each dimension. + padding_size: A list in the format of {dim0_begin, dim0_end, dim1_begin, dim1_end, ...} that + represent number of values padded to each dimension. name: Name of the operator. Returns: From fe447801cfd096aafb99e8aa3415a18eb41cc19f Mon Sep 17 00:00:00 2001 From: mrbean Date: Thu, 15 Jul 2021 05:56:50 +0000 Subject: [PATCH 08/10] update padding --- smaug/operators/padding_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/smaug/operators/padding_op.h b/smaug/operators/padding_op.h index 672a829b..f51d4815 100644 --- a/smaug/operators/padding_op.h +++ b/smaug/operators/padding_op.h @@ -32,12 +32,12 @@ class PaddingOp : public Operator { * ,dimk_backward> */ void setPaddingSize(RepeatedField const& val) { - std::vector paddingSize(val.begin(), val.end()); + paddingSize.assign(val.begin(), val.end()); } void setPaddingSize(std::vector const& val) { paddingSize = val; } - const std::vector getPaddingSize() { return paddingSize; } + std::vector getPaddingSize() const { return paddingSize; } void run() override { Tensor* input = getInput(0); From d818a5ffe89052d4505d3dc3e2a5a5f083c7f094 Mon Sep 17 00:00:00 2001 From: mrbean Date: Fri, 16 Jul 2021 02:10:18 +0000 Subject: [PATCH 09/10] update padding --- smaug/operators/padding_op.h | 32 ++++++++++++++--------------- smaug/operators/padding_op_test.cpp | 2 +- smaug/python/ops/array_ops.py | 8 +++++--- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/smaug/operators/padding_op.h b/smaug/operators/padding_op.h index f51d4815..9188b4b0 100644 --- a/smaug/operators/padding_op.h +++ b/smaug/operators/padding_op.h @@ -28,42 +28,42 @@ class PaddingOp : public Operator { /** * Set the paddingSize of the Tensor along each dimension. - * The paddingSize is orgainized as + * The paddingSize is orgainized as <{dim0_begin, dim0_end, dim1_begin, + * dim1_end, ... > */ - void setPaddingSize(RepeatedField const& val) { + void setPaddingSize(const RepeatedField& val) { paddingSize.assign(val.begin(), val.end()); } void setPaddingSize(std::vector const& val) { paddingSize = val; } - std::vector getPaddingSize() const { return paddingSize; } + const std::vector& getPaddingSize() const { return paddingSize; } void run() override { - Tensor* input = getInput(0); - Tensor* output = getOutput(0); + Tensor* input = getInput(kInput); + Tensor* output = getOutput(kOutput); int ndims = input->ndims(); - const std::vector inputDims = input->getShape().dims(); - const std::vector outputDims = output->getShape().dims(); + const std::vector& inputDims = input->getShape().dims(); + const std::vector& outputDims = output->getShape().dims(); int total_dim = 1; for (int i : outputDims) { total_dim *= i; } std::vector vf(total_dim, 0); output->fillData(vf.data(), vf.size()); - std::vector destOrigin, paddingBegin, srcOrigin; + std::vector paddingBegin, srcOrigin; for (int i = 0; i < ndims; i++) { - paddingBegin.push_back(paddingSize[2 * i]); + paddingBegin.push_back(paddingSize.at(2 * i)); srcOrigin.push_back(0); } - destOrigin = std::vector(paddingBegin); + std::vector destOrigin = std::vector(paddingBegin); std::vector regionSize = inputDims; copyTensorRegion(output, input, destOrigin, srcOrigin, regionSize); } // Optional override for testing purposes. void createAllTensors() override { - Tensor* input = getInput(0); + Tensor* input = getInput(kInput); int ndims = input->ndims(); std::vector dims = input->getShape().dims(); for (int i = 0; i < ndims; i++) { @@ -73,12 +73,12 @@ class PaddingOp : public Operator { dims, input->getShape().getLayout(), Backend::Alignment); Tensor* output = new Tensor(name, shape); workspace->addTensor(output); - outputs.at(0) = output; + outputs.at(kOutput) = output; } // Optional but recommended function to verify operator parameters. bool validate() override { - Tensor* input = getInput(0); + Tensor* input = getInput(kInput); int ndims = input->ndims(); if (paddingSize.size() != 2 * ndims) { return false; @@ -86,8 +86,8 @@ class PaddingOp : public Operator { return Operator::validate(); } - enum { kInputs, kNumInputs }; - enum { kOutputs, kNumOutputs }; + enum { kInput, kNumInputs }; + enum { kOutput, kNumOutputs }; private: std::vector paddingSize = {}; diff --git a/smaug/operators/padding_op_test.cpp b/smaug/operators/padding_op_test.cpp index 8b5dcda9..a78b31a3 100644 --- a/smaug/operators/padding_op_test.cpp +++ b/smaug/operators/padding_op_test.cpp @@ -99,7 +99,7 @@ TEST_CASE_METHOD(SmaugTest, "padding a tensor", "[refop]") { std::vector expected_output{ 0, 0, 0, // input 0, chan 0, row -1 0, 0, 0, // input 0, chan 0, row 0 - 0, 0, 0, // input 0, chan 1, row 3 + 0, 0, 0, // input 0, chan 1, row 1 }; // This performs an approximate comparison between the tensor's output // and the expected values. diff --git a/smaug/python/ops/array_ops.py b/smaug/python/ops/array_ops.py index d89941a9..bef35cb9 100644 --- a/smaug/python/ops/array_ops.py +++ b/smaug/python/ops/array_ops.py @@ -349,8 +349,10 @@ def padding(input_tensor, padding_size, name="padding"): Args: input_tensor: Input tensor. - padding_size: A list in the format of {dim0_begin, dim0_end, dim1_begin, dim1_end, ...} that - represent number of values padded to each dimension. + padding_size: A list in the format of {dim0_begin, dim0_end, dim1_begin, + dim1_end, ...} that represent number of values padded to + each dimension. Note that the order of dimensions of this + must align with the data layout of input_tensor. name: Name of the operator. Returns: @@ -359,7 +361,7 @@ def padding(input_tensor, padding_size, name="padding"): src_layout = input_tensor.shape.layout src_dims = input_tensor.shape.dims if len(padding_size) != 2 * len(src_dims): - raise ValueError("The padding_size's dimension must be two times as the input_tensor") + raise ValueError("len(padding_size) should be 2x input_tensor.shape.dims") output_tensor_dims = [0] * len(src_dims) for i in range(len(src_dims)): output_tensor_dims[i] = src_dims[i] + padding_size[2 * i] + padding_size[2 * i+1] From 7689d041b877cea4821e80de9f8406de24cfd7f0 Mon Sep 17 00:00:00 2001 From: mrbean Date: Fri, 16 Jul 2021 06:27:26 +0000 Subject: [PATCH 10/10] update padding --- smaug/operators/padding_op.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/smaug/operators/padding_op.h b/smaug/operators/padding_op.h index 9188b4b0..66540913 100644 --- a/smaug/operators/padding_op.h +++ b/smaug/operators/padding_op.h @@ -56,9 +56,7 @@ class PaddingOp : public Operator { paddingBegin.push_back(paddingSize.at(2 * i)); srcOrigin.push_back(0); } - std::vector destOrigin = std::vector(paddingBegin); - std::vector regionSize = inputDims; - copyTensorRegion(output, input, destOrigin, srcOrigin, regionSize); + copyTensorRegion(output, input, paddingBegin, srcOrigin, inputDims); } // Optional override for testing purposes.