Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FP8 OCP to FP8 FNUZ on hardware with only FP8 FNUZ support #3684

Open
wants to merge 27 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 21 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
70336db
Initial
CharlieL7 Oct 3, 2024
b41c8b6
Merge branch 'develop' of github.com:ROCm/AMDMIGraphX into ocp_to_fnuz
CharlieL7 Nov 12, 2024
bdebeb5
progress
CharlieL7 Nov 15, 2024
a1fb21e
cleanup
CharlieL7 Nov 22, 2024
b8e2041
remove unneeded files
CharlieL7 Nov 22, 2024
8366434
Fix bit_cast kernel
CharlieL7 Nov 22, 2024
a15e5a4
Merge branch 'develop' of github.com:ROCm/AMDMIGraphX into bit_cast_op
CharlieL7 Nov 22, 2024
be5d9a0
Merge branch 'develop' of github.com:ROCm/AMDMIGraphX into bit_cast_op
CharlieL7 Nov 22, 2024
3e08ab2
Merge branch 'develop' of github.com:ROCm/AMDMIGraphX into bit_cast_op
CharlieL7 Nov 25, 2024
7b40796
Merge branch 'bit_cast_op' of github.com:ROCm/AMDMIGraphX into bit_ca…
CharlieL7 Nov 25, 2024
697d459
progress
CharlieL7 Nov 27, 2024
4b6c8c1
fix template for gpu bit_cast
CharlieL7 Nov 27, 2024
531150f
Merge branch 'develop' into bit_cast_op
CharlieL7 Nov 27, 2024
d53ac35
Merge branch 'bit_cast_op' of github.com:ROCm/AMDMIGraphX into ocp_to…
CharlieL7 Nov 27, 2024
95a3cd7
first implementation
CharlieL7 Nov 27, 2024
98d8760
progress
CharlieL7 Dec 4, 2024
7357367
Merge branch 'develop' of github.com:ROCm/AMDMIGraphX into ocp_to_fnuz
CharlieL7 Dec 4, 2024
e3d84fc
Fixes and first test works
CharlieL7 Dec 4, 2024
dac07c2
formatting
CharlieL7 Dec 4, 2024
06b94b8
Added ref tests
CharlieL7 Dec 5, 2024
3e5d3a8
Merge branch 'develop' of github.com:ROCm/AMDMIGraphX into ocp_to_fnuz
CharlieL7 Dec 9, 2024
df0202e
Cleanup
CharlieL7 Dec 10, 2024
0a4d6bf
add verify test
CharlieL7 Dec 11, 2024
c94c520
Fix bug with __builtin_nan(string)
CharlieL7 Dec 13, 2024
d025e47
Merge branch 'develop' into ocp_to_fnuz
CharlieL7 Dec 13, 2024
0cddfbf
separate quantizable ops
CharlieL7 Dec 13, 2024
3c36b9b
Merge branch 'ocp_to_fnuz' of github.com:ROCmSoftwarePlatform/AMDMIGr…
CharlieL7 Dec 13, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ add_library(migraphx
file_buffer.cpp
fileutils.cpp
fp_to_double.cpp
fp8_ocp_to_nanoo.cpp
fuse_concat.cpp
fuse_pointwise.cpp
fuse_pointwise_reduce.cpp
Expand Down
173 changes: 173 additions & 0 deletions src/fp8_ocp_to_nanoo.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/fp8_ocp_to_nanoo.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/qdq_helpers.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace {

using fp8::fp8e4m3fnuz;

struct match_fp8ocp_convert_to_fp8nanoo
{
auto matcher() const
{
auto dq1 =
match::arg(0)(skip_post_dq_ops(dequantizelinear_op("scale1", "zp1").bind("dq1")));
auto dq2 =
match::arg(1)(skip_post_dq_ops(dequantizelinear_op("scale2", "zp2").bind("dq2")));
return match::name(get_quantizable_op_names())(dq1, dq2);
}

static auto bit_cast_and_handle_specials(module& m,
const instruction_ref dq,
const instruction_ref x,
const instruction_ref bits_0x80_lit,
const instruction_ref bits_0x7f_lit,
const instruction_ref bits_0xff_lit,
const instruction_ref bits_0x00_lit)
{
auto x_lens = x->get_shape().lens();
auto cast_input = m.insert_instruction(
dq, make_op("bit_cast", {{"target_type", shape::fp8e4m3fnuz_type}}), x);
auto mb_bits_0x80_lit = m.insert_instruction(
dq, make_op("multibroadcast", {{"out_lens", x_lens}}), bits_0x80_lit);
auto mb_bits_0x7f_lit = m.insert_instruction(
dq, make_op("multibroadcast", {{"out_lens", x_lens}}), bits_0x7f_lit);
auto mb_bits_0xff_lit = m.insert_instruction(
dq, make_op("multibroadcast", {{"out_lens", x_lens}}), bits_0xff_lit);
auto mb_zero_lit = m.insert_instruction(
dq, make_op("multibroadcast", {{"out_lens", x_lens}}), bits_0x00_lit);
// negative zero in fp8e4m3fn to zero in fp8e4m3fnuz
// a == 0x80 ? 0x0 : a
auto is_neg_zero = m.insert_instruction(dq, make_op("equal"), cast_input, mb_bits_0x80_lit);
auto ret = m.insert_instruction(dq, make_op("where"), is_neg_zero, mb_zero_lit, cast_input);

// positive and negative NaN in fp8e4m3fn to NaN in fp8e4m3fnuz
// (a == 0x7f or a == 0xff) ? 0x80 : a
auto eq_0x7f = m.insert_instruction(dq, make_op("equal"), ret, mb_bits_0x7f_lit);

auto eq_0xff = m.insert_instruction(dq, make_op("equal"), ret, mb_bits_0xff_lit);

auto cond = m.insert_instruction(dq, make_op("logical_or"), eq_0x7f, eq_0xff);
ret = m.insert_instruction(dq, make_op("where"), cond, mb_bits_0x80_lit, ret);
return ret;
}

// Add the same broadcast instructions after adjusted scales or
// adjusted zero points from after the originals. Similar to
// propagate_quantized_ins in simplify_qdq.
static auto propagate_broadcasts(module& m,
const instruction_ref adj,
const instruction_ref ori,
const instruction_ref start,
const instruction_ref insert_pt)
{
auto prev_ins = start;
std::vector<instruction_ref> ins_inbetween;

Check warning on line 93 in src/fp8_ocp_to_nanoo.cpp

View workflow job for this annotation

GitHub Actions / misspell

[misspell] src/fp8_ocp_to_nanoo.cpp#L93

"inbetween" is a misspelling of "between"
Raw output
./src/fp8_ocp_to_nanoo.cpp:93:41: "inbetween" is a misspelling of "between"
// matcher skips continguous, multi/broadcasts and transposes, collect all those
// instructions
while(prev_ins != ori)
{
ins_inbetween.push_back(prev_ins);
prev_ins = prev_ins->inputs().front();
}
auto ret = adj;
for(auto ins : reverse_iterator_for(ins_inbetween))

Check warning on line 102 in src/fp8_ocp_to_nanoo.cpp

View workflow job for this annotation

GitHub Actions / misspell

[misspell] src/fp8_ocp_to_nanoo.cpp#L102

"inbetween" is a misspelling of "between"
Raw output
./src/fp8_ocp_to_nanoo.cpp:102:48: "inbetween" is a misspelling of "between"
{
ret = m.insert_instruction(insert_pt, (*ins)->get_operator(), {ret});
}
return ret;
}

static auto cast_to_nanoo(module& m,
const instruction_ref dq,
const instruction_ref input,
const instruction_ref dq_scale,
const instruction_ref dq_zp)
{
auto x = input;
std::vector<fp8e4m3fnuz> bits_0x80 = {fp8e4m3fnuz(0x80, fp8e4m3fnuz::from_bits())};
auto bits_0x80_lit = m.add_literal(shape{shape::fp8e4m3fnuz_type, {1}, {0}}, bits_0x80);

std::vector<fp8e4m3fnuz> bits_0x7f = {fp8e4m3fnuz(0x7f, fp8e4m3fnuz::from_bits())};
auto bits_0x7f_lit = m.add_literal(shape{shape::fp8e4m3fnuz_type, {1}, {0}}, bits_0x7f);

std::vector<fp8e4m3fnuz> bits_0xff = {fp8e4m3fnuz(0xff, fp8e4m3fnuz::from_bits())};
auto bits_0xff_lit = m.add_literal(shape{shape::fp8e4m3fnuz_type, {1}, {0}}, bits_0xff);

std::vector<fp8e4m3fnuz> bits_0x00 = {fp8e4m3fnuz(0x00, fp8e4m3fnuz::from_bits())};
auto bits_0x00_lit = m.add_literal(shape{shape::fp8e4m3fnuz_type, {1}, {0}}, bits_0x00);

x = bit_cast_and_handle_specials(
m, dq, x, bits_0x80_lit, bits_0x7f_lit, bits_0xff_lit, bits_0x00_lit);
auto adj_dq_zp = bit_cast_and_handle_specials(
m, dq, dq_zp, bits_0x80_lit, bits_0x7f_lit, bits_0xff_lit, bits_0x00_lit);

// adj_scale = 2 * scale
auto two_lit = m.add_literal(literal{shape{dq_scale->get_shape().type()}, {2}});
two_lit = m.insert_instruction(
dq, make_op("multibroadcast", {{"out_lens", dq_scale->get_shape().lens()}}), two_lit);
auto adj_dq_scale = m.insert_instruction(dq, make_op("mul"), dq_scale, two_lit);

adj_dq_scale = propagate_broadcasts(m, adj_dq_scale, dq_scale, dq->inputs().at(1), dq);
adj_dq_zp = propagate_broadcasts(m, adj_dq_zp, dq_zp, dq->inputs().at(2), dq);
m.replace_instruction(dq, make_op("dequantizelinear"), x, adj_dq_scale, adj_dq_zp);
}

auto apply(module& m, const match::matcher_result& r) const
{
auto dq1 = r.instructions["dq1"];
auto dq2 = r.instructions["dq2"];
auto scale1 = r.instructions["scale1"];
auto scale2 = r.instructions["scale2"];
auto zp1 = r.instructions["zp1"];
auto zp2 = r.instructions["zp2"];

std::set<migraphx::shape::type_t> supported_types = {migraphx::shape::fp8e4m3fn_type};
if(not contains(supported_types, dq1->inputs().front()->get_shape().type()) or
not contains(supported_types, dq2->inputs().front()->get_shape().type()))
return;

cast_to_nanoo(m, dq1, dq1->inputs().front(), scale1, zp1);
cast_to_nanoo(m, dq2, dq2->inputs().front(), scale2, zp2);
return;

Check warning on line 160 in src/fp8_ocp_to_nanoo.cpp

View workflow job for this annotation

GitHub Actions / tidy

redundant return statement at the end of a function with a void return type [readability-redundant-control-flow,-warnings-as-errors]
}
};

} // namespace

void fp8_ocp_to_nanoo::apply(module_pass_manager& mpm) const
{
module_ref mm = &mpm.get_module();
match::find_matches(*mm, match_fp8ocp_convert_to_fp8nanoo{});
}

} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
48 changes: 48 additions & 0 deletions src/include/migraphx/fp8_ocp_to_nanoo.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_FP8_OCP_TO_NANOO_HPP
#define MIGRAPHX_GUARD_RTGLIB_FP8_OCP_TO_NANOO_HPP

#include <migraphx/config.hpp>
#include <migraphx/pass_manager.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {

/**
* Convert fp8e4m3fn to fp8e4m3fnuz for hardware that only supports fp8e4m3fnuz data types intrinsically.
* Conversion uses the same bit representation and adjusts scaling factors at the dequantization.
* Using the same bit representation from fp8e4m3fn to fp8e4m3fnuz halves the floating point representation.
* This pass should run before simplify_qdq so that the scales and zero points calculated by simplify_qdq have the correct adjusted scaling factors
*/
struct MIGRAPHX_EXPORT fp8_ocp_to_nanoo
CharlieL7 marked this conversation as resolved.
Show resolved Hide resolved
{
std::string name() const { return "fp8_ocp_to_nanoo"; }
void apply(module_pass_manager& mpm) const;
};

} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx

#endif
1 change: 1 addition & 0 deletions src/include/migraphx/op/bit_cast.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ struct bit_cast : unary<bit_cast>
args[0].visit([&](auto input) {
using itype = typename decltype(input)::value_type;
if constexpr(sizeof(otype) == sizeof(itype))

{
par_transform(input.begin(), input.end(), output.begin(), [&](auto x) {
return migraphx::bit_cast<otype>(x);
Expand Down
60 changes: 60 additions & 0 deletions src/include/migraphx/qdq_helpers.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/

#ifndef MIGRAPHX_GUARD_RTGLIB_QDQ_HELPERS_HPP
#define MIGRAPHX_GUARD_RTGLIB_QDQ_HELPERS_HPP

#include <migraphx/config.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/make_op.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {

namespace {

Check warning on line 35 in src/include/migraphx/qdq_helpers.hpp

View workflow job for this annotation

GitHub Actions / tidy

do not use unnamed namespaces in header files [cert-dcl59-cpp,google-build-namespaces,-warnings-as-errors]

template <class... Ms>
auto skip_post_dq_ops(Ms... ms)
{
return match::skip(match::name(
"broadcast", "multibroadcast", "contiguous", "transpose", "reshape", "convert"))(ms...);
}

static std::unordered_set<std::string> get_quantizable_op_names()

Check warning on line 44 in src/include/migraphx/qdq_helpers.hpp

View workflow job for this annotation

GitHub Actions / tidy

'get_quantizable_op_names' is a static definition in anonymous namespace; static is redundant here [readability-static-definition-in-anonymous-namespace,-warnings-as-errors]
CharlieL7 marked this conversation as resolved.
Show resolved Hide resolved
{
static std::unordered_set<std::string> s = {"convolution", "dot"};
return s;
}

static auto dequantizelinear_op(const std::string& scale, const std::string& zp)

Check warning on line 50 in src/include/migraphx/qdq_helpers.hpp

View workflow job for this annotation

GitHub Actions / tidy

'dequantizelinear_op' is a static definition in anonymous namespace; static is redundant here [readability-static-definition-in-anonymous-namespace,-warnings-as-errors]
CharlieL7 marked this conversation as resolved.
Show resolved Hide resolved
{
return match::name("dequantizelinear")(
match::arg(1)(match::skip_broadcasts(match::is_constant().bind(scale))),
match::arg(2)(match::skip_broadcasts(match::is_constant().bind(zp))));
}
} // namespace
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx

#endif
24 changes: 3 additions & 21 deletions src/simplify_qdq.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,24 +36,12 @@
#include <migraphx/op/quant_dot.hpp>
#include <migraphx/register_op.hpp>
#include <migraphx/fp8_types.hpp>
#include <migraphx/qdq_helpers.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace {

template <class... Ms>
auto skip_post_dq_ops(Ms... ms)
{
return match::skip(match::name(
"broadcast", "multibroadcast", "contiguous", "transpose", "reshape", "convert"))(ms...);
}

std::unordered_set<std::string> get_quantizable_op_names()
{
static std::unordered_set<std::string> s = {"convolution", "dot"};
return s;
}

struct match_find_quantizable_ops
{
static bool
Expand Down Expand Up @@ -117,14 +105,6 @@ struct match_find_quantizable_ops
return qinp;
}

static auto dequantizelinear_op(const std::string& scale, const std::string& zp)
{
return match::name("dequantizelinear")(
match::arg(0)(match::skip(match::name("quantizelinear"))(match::any())),
match::arg(1)(match::skip_broadcasts(match::is_constant().bind(scale))),
match::arg(2)(match::skip_broadcasts(match::is_constant().bind(zp))));
}

auto matcher() const
{
auto dq1 =
Expand Down Expand Up @@ -231,7 +211,9 @@ struct match_find_quantizable_ops
is_valid_qparam(zp1, out_lens, out_lens.size() - 2) and
is_valid_qparam(scale2, out_lens, out_lens.size() - 1) and
is_valid_qparam(zp2, out_lens, out_lens.size() - 1)))
{
return;
}

// This implementation supports both arguments being per-axis affine quantized
// In practice, inputs are per-tensor affine and weights are per-axis symmetric
Expand Down
3 changes: 3 additions & 0 deletions src/targets/gpu/target.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include <migraphx/eliminate_data_type.hpp>
#include <migraphx/eliminate_identity.hpp>
#include <migraphx/eliminate_pad.hpp>
#include <migraphx/fp8_ocp_to_nanoo.hpp>
#include <migraphx/fuse_concat.hpp>
#include <migraphx/fuse_pointwise_reduce.hpp>
#include <migraphx/inline_module.hpp>
Expand Down Expand Up @@ -179,6 +180,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{},
eliminate_identity{},
dead_code_elimination{},
enable_pass(not gpu::gfx_has_fp8ocp_intrinsics() and gpu::gfx_has_fp8fnuz_intrinsics(), fp8_ocp_to_nanoo{}),
enable_pass(not gpu::gfx_has_fp8ocp_intrinsics() and gpu::gfx_has_fp8fnuz_intrinsics(), dead_code_elimination{}),
simplify_qdq{},
enable_pass(not mlir_enabled(), rewrite_quantization{}),
dead_code_elimination{},
Expand Down
Loading
Loading