Skip to content

Commit

Permalink
Merge branch 'develop' into windows-instruction-ref
Browse files Browse the repository at this point in the history
  • Loading branch information
causten authored Oct 10, 2023
2 parents 5e3aaaa + e5cd8b6 commit e07b7ca
Show file tree
Hide file tree
Showing 18 changed files with 507 additions and 39 deletions.
2 changes: 2 additions & 0 deletions src/include/migraphx/normalize_attributes.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ using dependent_type = typename select_dependent_type<T, Ts...>::type;
* \param attr_val the normalize_axes attributes from the operator
* \param prefix error message prefix
*/
MIGRAPHX_EXPORT
std::vector<int64_t> normalize_axes(const std::vector<int64_t>& axes,
const shape& input_shape,
const value& attr_val,
Expand All @@ -67,6 +68,7 @@ std::vector<int64_t> normalize_axes(const std::vector<int64_t>& axes,
* \param attr_val the normalize_axes attributes from the operator
* \param prefix error message prefix
*/
MIGRAPHX_EXPORT
std::vector<int64_t> normalize_indices(const std::vector<int64_t>& indices,
const std::vector<int64_t>& axes,
const shape& input_shape,
Expand Down
1 change: 1 addition & 0 deletions src/include/migraphx/pad_calc.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ shape compute_padded_shape(const shape& input,

// Used for dynamic auto padding of pooling operators where padding needs to be computed at
// evaulation time.
MIGRAPHX_EXPORT
shape compute_padded_pool_shape(const shape& input,
const shape& kernel,
const std::vector<std::size_t>& padding,
Expand Down
76 changes: 76 additions & 0 deletions src/onnx/broadcast_qdq.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/

#include <migraphx/onnx/broadcast_qdq.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {

// This method is to prep for quantizelinear or dequantizelinear operation for
// either the broadcasting of weight-scale or zero-points of qlinearadd operator
// outputs: operator op (inputs x, broadcasted: scale (float) & zero_pt (8-bit))
instruction_ref bcast_qdq_instr(const std::string& op_name,
instruction_ref x_in,
instruction_ref arg_fscale,
instruction_ref arg_z_pt,
const onnx_parser::node_info& info)
{
auto in_lens = x_in->get_shape().lens();

// prep 1: broadcast scale. it can come as a scalar or a 1-D tensor.
instruction_ref bcast_scale;
if(arg_fscale->get_shape().elements() > 1)
bcast_scale = info.add_instruction(
migraphx::make_op("broadcast", {{"axis", 0}, {"out_lens", in_lens}}), arg_fscale);
else
bcast_scale = info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", in_lens}}), arg_fscale);

// prep 2: broadcast zero point. it can come as a scalar or a 1-D tensor.
instruction_ref bcast_zero_pt;
if(arg_z_pt->get_shape().elements() > 1)
bcast_zero_pt = info.add_instruction(
migraphx::make_op("broadcast", {{"axis", 0}, {"out_lens", in_lens}}), arg_z_pt);
else
bcast_zero_pt = info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", in_lens}}), arg_z_pt);

// op_name is either quantizelinear or dequantizelinear:
return info.add_instruction(migraphx::make_op(op_name), x_in, bcast_scale, bcast_zero_pt);
}

// Multibroadcast a scaler..
instruction_ref bcast_scalar_instr(const migraphx::shape& shape_out,
instruction_ref arg_in,
const onnx_parser::node_info& info)
{
auto bcast_instr_out = info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", shape_out.lens()}}), arg_in);
return bcast_instr_out;
}

} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
56 changes: 56 additions & 0 deletions src/onnx/include/migraphx/onnx/broadcast_qdq.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/

#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_BROADCAST_QDQ_HPP
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_BROADCAST_QDQ_HPP

#include <string>

#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {

// This method is to prep for quantizelinear or dequantizelinear operation for
// either the broadcasting of weight-scale or zero-points of qlinearadd operator
// outputs: operator op (inputs x, broadcasted: scale (float) & zero_pt (8-bit))
instruction_ref bcast_qdq_instr(const std::string& op_name,
instruction_ref x_in,
instruction_ref arg_fscale,
instruction_ref arg_z_pt,
const onnx_parser::node_info& info);

// Multibroadcast a scaler..
instruction_ref bcast_scalar_instr(const migraphx::shape& shape_out,
instruction_ref arg_in,
const onnx_parser::node_info& info);

} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx

#endif
3 changes: 1 addition & 2 deletions src/onnx/parse_depthtospace.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,7 @@ struct parse_depthtospace : op_parser<parse_depthtospace>

auto temp1 = info.add_instruction(make_op("reshape", {{"dims", lens1}}), args[0]);
auto temp2 = info.add_instruction(make_op("transpose", {{"permutation", perm}}), temp1);
return info.add_instruction(make_op("reshape", {{"dims", lens2}}),
info.make_contiguous(temp2));
return info.add_instruction(make_op("reshape", {{"dims", lens2}}), temp2);
}
};

Expand Down
154 changes: 154 additions & 0 deletions src/onnx/parse_qlinearadd.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/

#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/common.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/onnx/checks.hpp>
#include <migraphx/onnx/broadcast_qdq.hpp>
#include <migraphx/instruction.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {

/*
*********************************************************************************
* Reference: see QLinearAdd in *
* https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md *
*********************************************************************************
com.microsoft.QLinearAdd
Performs element-wise binary addition on 8 bit data types (with Numpy-style broadcasting support).
C = (A_scale * (A - A_zero_point) + B_scale * (B - B_zero_point))/C_scale + C_zero_point
Version
This version of the operator has been available since version 1 of the 'com.microsoft' operator
set.
Inputs (7 - 8)
A : T
First operand.
A_scale : tensor(float)
Input A's scale. It's a scalar, which means a per-tensor/layer quantization.
A_zero_point (optional) : T
Input A zero point. Default value is 0 if it's not specified. It's a scalar, which means a
per-tensor/layer quantization.
B : T
Second operand.
B_scale : tensor(float)
Input B's scale. It's a scalar, which means a per-tensor/layer quantization.
B_zero_point (optional) : T
Input B zero point. Default value is 0 if it's not specified. It's a scalar, which means a
per-tensor/layer quantization.
C_scale : tensor(float)
Output scale. It's a scalar, which means a per-tensor/layer quantization.
C_zero_point (optional) : T
Output zero point. Default value is 0 if it's not specified. It's a scalar, which means a
per-tensor/layer quantization.
Outputs
C : T
Result, has same element type as two inputs
Type Constraints
T : tensor(uint8), tensor(int8)
Constrain input and output types to 8 bit signed and unsigned tensors.
*/

struct parse_qlinearadd : op_parser<parse_qlinearadd>
{
std::vector<op_desc> operators() const { return {{"QLinearAdd"}}; }

// basic type checking for QLinearAdd Operator
void check_inputs(const std::vector<instruction_ref>& args) const
{
if(args.size() < 7)
MIGRAPHX_THROW("QLINEARADD: missing inputs");

const auto& in_a = args[0];
const auto& in_b = args[3];

auto sh_a = in_a->get_shape();
auto sh_b = in_b->get_shape();

auto type_a = sh_a.type();
auto type_b = sh_b.type();
if(type_a != migraphx::shape::int8_type and type_a != migraphx::shape::uint8_type)
MIGRAPHX_THROW("QLINEARADD: unsupported input type");
if(type_b != migraphx::shape::int8_type and type_b != migraphx::shape::uint8_type)
MIGRAPHX_THROW("QLINEARADD: unsupported input type");
if(type_a != type_b)
MIGRAPHX_THROW("QLINEARADD: mismatched input types");
}

instruction_ref parse(const op_desc& /* opd */,
const onnx_parser& /*parser*/,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
check_inputs(args);

// A
const auto& in_a = args[0];
const auto& in_scale_a = args[1];
const auto& in_zero_pt_a = args[2];

auto dquant_a = bcast_qdq_instr("dequantizelinear", in_a, in_scale_a, in_zero_pt_a, info);

// B
const auto& in_b = args[3];
const auto& in_scale_b = args[4];
const auto& in_zero_pt_b = args[5];
auto dquant_b = bcast_qdq_instr("dequantizelinear", in_b, in_scale_b, in_zero_pt_b, info);

// C = A + B
auto out_c = info.add_common_op("add", dquant_a, dquant_b);

const auto& in_scale_c = args[6];

// zero_pt for C is supplied as the last optional argument..
if(args.size() == 8)
return (bcast_qdq_instr("quantizelinear", out_c, in_scale_c, args[7], info));

// if no zero_pt: just broadcast the scale..
auto bcast_scale_c = bcast_scalar_instr(out_c->get_shape(), in_scale_c, info);
return (info.add_instruction(migraphx::make_op("quantizelinear"), out_c, bcast_scale_c));
}
};

} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
3 changes: 1 addition & 2 deletions src/onnx/parse_reshape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ struct parse_reshape : op_parser<parse_reshape>
s.visit([&](auto v) { copy(v, std::back_inserter(dims)); });
}

auto cont = info.add_instruction(make_op("contiguous"), args[0]);
return info.add_instruction(make_op("reshape", {{"dims", dims}}), cont);
return info.add_instruction(make_op("reshape", {{"dims", dims}}), args[0]);
}
};

Expand Down
3 changes: 1 addition & 2 deletions src/onnx/parse_spacetodepth.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,7 @@ struct parse_spacetodepth : op_parser<parse_spacetodepth>
std::vector<int64_t> perm = {0, 3, 5, 1, 2, 4};
auto temp1 = info.add_instruction(make_op("reshape", {{"dims", trans_lens}}), args[0]);
auto temp2 = info.add_instruction(make_op("transpose", {{"permutation", perm}}), temp1);
return info.add_instruction(make_op("reshape", {{"dims", res_lens}}),
info.make_contiguous(temp2));
return info.add_instruction(make_op("reshape", {{"dims", res_lens}}), temp2);
}
};

Expand Down
9 changes: 5 additions & 4 deletions src/quantization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,10 @@ void quantize_int8(program& prog,
MIGRAPHX_THROW("QUANTIZE_INT8: only support DOT and CONVOLUTION operation");
}

// Run optimize_module() before converting to int8 to const eval and fold in FP32 to
// avoid loss of precision.
run_passes(prog, {optimize_module{}});

std::shared_ptr<std::vector<std::pair<float, float>>> int8_quant_params =
std::make_shared<std::vector<std::pair<float, float>>>();
std::shared_ptr<std::vector<float>> max_abs_vals = std::make_shared<std::vector<float>>();
Expand Down Expand Up @@ -143,10 +147,7 @@ void quantize_int8(program& prog,

run_passes(prog,
{quantize_int8_pass{ins_names, *int8_quant_params},
eliminate_common_subexpression{},
dead_code_elimination{},
simplify_reshapes{},
dead_code_elimination{},
optimize_module{},
simplify_qdq{},
dead_code_elimination{}});
}
Expand Down
6 changes: 5 additions & 1 deletion src/targets/gpu/device/targets.hpp.in
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#ifndef MIGRAPHX_GUARD_DEVICE_TARGETS_CPP
#define MIGRAPHX_GUARD_DEVICE_TARGETS_CPP

#include <migraphx/config.hpp>
#include <migraphx/gpu/device/config.hpp>
#include <string>
#include <vector>

Expand All @@ -34,9 +34,13 @@ namespace gpu {
namespace device {
#define MIGRAPHX_GPU_TARGETS "@GPU_TARGETS@" // NOLINT

MIGRAPHX_DEVICE_EXPORT
const std::vector<std::string>& get_targets();

MIGRAPHX_DEVICE_EXPORT
std::string get_targets_as_string();

MIGRAPHX_DEVICE_EXPORT
std::string get_device_name();

} // namespace device
Expand Down
Loading

0 comments on commit e07b7ca

Please sign in to comment.