Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a padding operator. #95

Merged
merged 11 commits into from
Jul 17, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 16 additions & 16 deletions smaug/operators/padding_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,42 +28,42 @@ class PaddingOp : public Operator {

/**
* Set the paddingSize of the Tensor along each dimension.
* The paddingSize is orgainized as <dim1_forward, dim1_backward, ...
* ,dimk_backward>
* The paddingSize is orgainized as <{dim0_begin, dim0_end, dim1_begin,
* dim1_end, ... >
*/
void setPaddingSize(RepeatedField<google::protobuf::int32> const& val) {
void setPaddingSize(const RepeatedField<google::protobuf::int32>& val) {
paddingSize.assign(val.begin(), val.end());
}

void setPaddingSize(std::vector<int> const& val) { paddingSize = val; }

std::vector<int> getPaddingSize() const { return paddingSize; }
const std::vector<int>& getPaddingSize() const { return paddingSize; }

void run() override {
Tensor* input = getInput(0);
Tensor* output = getOutput(0);
Tensor* input = getInput(kInput);
Tensor* output = getOutput(kOutput);
int ndims = input->ndims();
const std::vector<int> inputDims = input->getShape().dims();
const std::vector<int> outputDims = output->getShape().dims();
const std::vector<int>& inputDims = input->getShape().dims();
const std::vector<int>& outputDims = output->getShape().dims();
int total_dim = 1;
for (int i : outputDims) {
total_dim *= i;
}
std::vector<float> vf(total_dim, 0);
output->fillData(vf.data(), vf.size());
std::vector<int> destOrigin, paddingBegin, srcOrigin;
std::vector<int> paddingBegin, srcOrigin;
for (int i = 0; i < ndims; i++) {
paddingBegin.push_back(paddingSize[2 * i]);
paddingBegin.push_back(paddingSize.at(2 * i));
srcOrigin.push_back(0);
}
destOrigin = std::vector<int>(paddingBegin);
std::vector<int> destOrigin = std::vector<int>(paddingBegin);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, I should have caught this earlier. Why build paddingBegin and then copy it into destOrigin? Why not just use paddingBegin directly?

std::vector<int> regionSize = inputDims;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here - no need to make a copy of inputDims, just use it directly. I think you're trying to make it more clear what each vector is representing in the copyTensorRegion call but there's no need since the API documents it very clearly already.

copyTensorRegion(output, input, destOrigin, srcOrigin, regionSize);
}

// Optional override for testing purposes.
void createAllTensors() override {
Tensor* input = getInput(0);
Tensor* input = getInput(kInput);
int ndims = input->ndims();
std::vector<int> dims = input->getShape().dims();
for (int i = 0; i < ndims; i++) {
Expand All @@ -73,21 +73,21 @@ class PaddingOp : public Operator {
dims, input->getShape().getLayout(), Backend::Alignment);
Tensor* output = new Tensor(name, shape);
workspace->addTensor(output);
outputs.at(0) = output;
outputs.at(kOutput) = output;
}

// Optional but recommended function to verify operator parameters.
bool validate() override {
Tensor* input = getInput(0);
Tensor* input = getInput(kInput);
int ndims = input->ndims();
if (paddingSize.size() != 2 * ndims) {
return false;
}
return Operator::validate();
}

enum { kInputs, kNumInputs };
enum { kOutputs, kNumOutputs };
enum { kInput, kNumInputs };
enum { kOutput, kNumOutputs };

private:
std::vector<int> paddingSize = {};
Expand Down
2 changes: 1 addition & 1 deletion smaug/operators/padding_op_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ TEST_CASE_METHOD(SmaugTest, "padding a tensor", "[refop]") {
std::vector<float> expected_output{
0, 0, 0, // input 0, chan 0, row -1
0, 0, 0, // input 0, chan 0, row 0
0, 0, 0, // input 0, chan 1, row 3
0, 0, 0, // input 0, chan 1, row 1
};
// This performs an approximate comparison between the tensor's output
// and the expected values.
Expand Down
8 changes: 5 additions & 3 deletions smaug/python/ops/array_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,8 +349,10 @@ def padding(input_tensor, padding_size, name="padding"):

Args:
input_tensor: Input tensor.
padding_size: A list in the format of {dim0_begin, dim0_end, dim1_begin, dim1_end, ...} that
represent number of values padded to each dimension.
padding_size: A list in the format of {dim0_begin, dim0_end, dim1_begin,
dim1_end, ...} that represent number of values padded to
each dimension. Note that the order of dimensions of this
must align with the data layout of input_tensor.
name: Name of the operator.

Returns:
Expand All @@ -359,7 +361,7 @@ def padding(input_tensor, padding_size, name="padding"):
src_layout = input_tensor.shape.layout
src_dims = input_tensor.shape.dims
if len(padding_size) != 2 * len(src_dims):
raise ValueError("The padding_size's dimension must be two times as the input_tensor")
raise ValueError("len(padding_size) should be 2x input_tensor.shape.dims")
output_tensor_dims = [0] * len(src_dims)
for i in range(len(src_dims)):
output_tensor_dims[i] = src_dims[i] + padding_size[2 * i] + padding_size[2 * i+1]
Expand Down