Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

qlinearadd operator #2188

Merged
merged 7 commits into from
Oct 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 76 additions & 0 deletions src/onnx/broadcast_qdq.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/

#include <migraphx/onnx/broadcast_qdq.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {

// This method is to prep for quantizelinear or dequantizelinear operation for
// either the broadcasting of weight-scale or zero-points of qlinearadd operator
// outputs: operator op (inputs x, broadcasted: scale (float) & zero_pt (8-bit))
instruction_ref bcast_qdq_instr(const std::string& op_name,
instruction_ref x_in,
instruction_ref arg_fscale,
instruction_ref arg_z_pt,
const onnx_parser::node_info& info)
{
auto in_lens = x_in->get_shape().lens();

// prep 1: broadcast scale. it can come as a scalar or a 1-D tensor.
instruction_ref bcast_scale;
if(arg_fscale->get_shape().elements() > 1)
bcast_scale = info.add_instruction(
migraphx::make_op("broadcast", {{"axis", 0}, {"out_lens", in_lens}}), arg_fscale);
else
bcast_scale = info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", in_lens}}), arg_fscale);

// prep 2: broadcast zero point. it can come as a scalar or a 1-D tensor.
instruction_ref bcast_zero_pt;
if(arg_z_pt->get_shape().elements() > 1)
bcast_zero_pt = info.add_instruction(
migraphx::make_op("broadcast", {{"axis", 0}, {"out_lens", in_lens}}), arg_z_pt);
else
bcast_zero_pt = info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", in_lens}}), arg_z_pt);

// op_name is either quantizelinear or dequantizelinear:
return info.add_instruction(migraphx::make_op(op_name), x_in, bcast_scale, bcast_zero_pt);
}

// Multibroadcast a scaler..
instruction_ref bcast_scalar_instr(const migraphx::shape& shape_out,
instruction_ref arg_in,
const onnx_parser::node_info& info)
{
auto bcast_instr_out = info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", shape_out.lens()}}), arg_in);
return bcast_instr_out;
}

} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
56 changes: 56 additions & 0 deletions src/onnx/include/migraphx/onnx/broadcast_qdq.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/

#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_BROADCAST_QDQ_HPP
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_BROADCAST_QDQ_HPP

#include <string>

#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {

// This method is to prep for quantizelinear or dequantizelinear operation for
// either the broadcasting of weight-scale or zero-points of qlinearadd operator
// outputs: operator op (inputs x, broadcasted: scale (float) & zero_pt (8-bit))
instruction_ref bcast_qdq_instr(const std::string& op_name,
instruction_ref x_in,
instruction_ref arg_fscale,
instruction_ref arg_z_pt,
const onnx_parser::node_info& info);

// Multibroadcast a scaler..
instruction_ref bcast_scalar_instr(const migraphx::shape& shape_out,
instruction_ref arg_in,
const onnx_parser::node_info& info);

} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx

#endif
154 changes: 154 additions & 0 deletions src/onnx/parse_qlinearadd.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/

#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/common.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/onnx/broadcast_qdq.hpp>
#include <migraphx/instruction.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {

/*
*********************************************************************************
* Reference: see QLinearAdd in *
* https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md *
*********************************************************************************

com.microsoft.QLinearAdd
Performs element-wise binary addition on 8 bit data types (with Numpy-style broadcasting support).

C = (A_scale * (A - A_zero_point) + B_scale * (B - B_zero_point))/C_scale + C_zero_point

Version
This version of the operator has been available since version 1 of the 'com.microsoft' operator
set.

Inputs (7 - 8)
A : T
First operand.

A_scale : tensor(float)
Input A's scale. It's a scalar, which means a per-tensor/layer quantization.

A_zero_point (optional) : T
Input A zero point. Default value is 0 if it's not specified. It's a scalar, which means a
per-tensor/layer quantization.

B : T
Second operand.

B_scale : tensor(float)
Input B's scale. It's a scalar, which means a per-tensor/layer quantization.

B_zero_point (optional) : T
Input B zero point. Default value is 0 if it's not specified. It's a scalar, which means a
per-tensor/layer quantization.

C_scale : tensor(float)
Output scale. It's a scalar, which means a per-tensor/layer quantization.

C_zero_point (optional) : T

Output zero point. Default value is 0 if it's not specified. It's a scalar, which means a
per-tensor/layer quantization.

Outputs
C : T
Result, has same element type as two inputs

Type Constraints
T : tensor(uint8), tensor(int8)
Constrain input and output types to 8 bit signed and unsigned tensors.

*/

struct parse_qlinearadd : op_parser<parse_qlinearadd>
{
std::vector<op_desc> operators() const { return {{"QLinearAdd"}}; }

// basic type checking for QLinearAdd Operator
void check_inputs(const std::vector<instruction_ref>& args) const
{
if(args.size() < 7)
MIGRAPHX_THROW("QLINEARADD: missing inputs");
umangyadav marked this conversation as resolved.
Show resolved Hide resolved

const auto& in_a = args[0];
const auto& in_b = args[3];
umangyadav marked this conversation as resolved.
Show resolved Hide resolved

auto sh_a = in_a->get_shape();
auto sh_b = in_b->get_shape();

auto type_a = sh_a.type();
auto type_b = sh_b.type();
if(type_a != migraphx::shape::int8_type and type_a != migraphx::shape::uint8_type)
MIGRAPHX_THROW("QLINEARADD: unsupported input type");
if(type_b != migraphx::shape::int8_type and type_b != migraphx::shape::uint8_type)
MIGRAPHX_THROW("QLINEARADD: unsupported input type");
umangyadav marked this conversation as resolved.
Show resolved Hide resolved
if(type_a != type_b)
MIGRAPHX_THROW("QLINEARADD: mismatched input types");
}

instruction_ref parse(const op_desc& /* opd */,
const onnx_parser& /*parser*/,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
check_inputs(args);

// A
const auto& in_a = args[0];
const auto& in_scale_a = args[1];
const auto& in_zero_pt_a = args[2];

auto dquant_a = bcast_qdq_instr("dequantizelinear", in_a, in_scale_a, in_zero_pt_a, info);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
auto dquant_a = bcast_qdq_instr("dequantizelinear", in_a, in_scale_a, in_zero_pt_a, info);
auto dquant_a = info.add_common_op("dequantizelinear", {in_a, in_scale_a, in_zero_pt_a}, false);

This requires some modifications of add_common_op under onnx_parser.cpp/hpp, common.cpp/hpp, but this would encourage reuse of existing functions that aim to do the same thing. I've tested by making the convert optional in add_common_op by adding a flag to the end of the function (on by default to keep other functional calls intact). Seems like it passes both tests when trying this approach.

@pfultz2 @umangyadav feel free to chime in if this approach is cleaner or not.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks. Please note, this new api uses Broadcast and Multibroadcast. The other one doesn't use Broadcast. I am not sure if we want to make it a kitchen sink there..;

There are other Quant operators that I am adding which shall use Broadcast, and so that part isn't tested by your new patch.

Copy link
Collaborator

@pfultz2 pfultz2 Oct 5, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I dont think add_common_op will work because we compare from the right. In this case we would need to compare from the left, and that only works because it will broadcast on axis 0. For axis 1 this wouldn't work. We could extend the add_common_op so an optional axis could be passed to handle 1d tensors.

Either way we may want to address this another PR.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should address in another PR. Effectively we want to create reusable functions that will automatically handle broadcaasting shapes to be compatible for any operation.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree that the common_op updates should be in another PR. We need to discuss how we want to design such an API for these cases.


// B
const auto& in_b = args[3];
const auto& in_scale_b = args[4];
const auto& in_zero_pt_b = args[5];
auto dquant_b = bcast_qdq_instr("dequantizelinear", in_b, in_scale_b, in_zero_pt_b, info);

// C = A + B
auto out_c = info.add_common_op("add", dquant_a, dquant_b);

const auto& in_scale_c = args[6];

// zero_pt for C is supplied as the last optional argument..
if(args.size() == 8)
return (bcast_qdq_instr("quantizelinear", out_c, in_scale_c, args[7], info));

// if no zero_pt: just broadcast the scale..
auto bcast_scale_c = bcast_scalar_instr(out_c->get_shape(), in_scale_c, info);
return (info.add_instruction(migraphx::make_op("quantizelinear"), out_c, bcast_scale_c));
}
};

} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
55 changes: 55 additions & 0 deletions test/onnx/gen_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -5096,6 +5096,61 @@ def prelu_brcst_test():
return ([node], [arg0, arg1], [arg_out])


@onnx_test()
def qlinearadd_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [64])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [], [0])

b = helper.make_tensor_value_info('B', TensorProto.UINT8, [64])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[128])

sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [], [64])

c = helper.make_tensor_value_info('C', TensorProto.UINT8, [64])

node = onnx.helper.make_node(
'QLinearAdd',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])


@onnx_test()
def qlinearadd_bcast_test():
a = helper.make_tensor_value_info('A', TensorProto.INT8, [64])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.INT8, [], [0])

b = helper.make_tensor_value_info('B', TensorProto.INT8, [1, 1, 64])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.INT8, [], [32])

sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.INT8, [], [-64])

c = helper.make_tensor_value_info('C', TensorProto.INT8, [1, 1, 64])

node = onnx.helper.make_node(
'QLinearAdd',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])


@onnx_test()
def quantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
Expand Down
Loading