From 5cc8cb10f7cfa86394391c464dbaac71ac58344a Mon Sep 17 00:00:00 2001 From: Soren Lassen Date: Sun, 17 Sep 2023 12:04:12 -0700 Subject: [PATCH 1/3] grew windows CI timeout (#2511) Signed-off-by: Soren Lassen --- .azure-pipelines/Windows-CI.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.azure-pipelines/Windows-CI.yml b/.azure-pipelines/Windows-CI.yml index aabb98e8d4..8303fee5aa 100644 --- a/.azure-pipelines/Windows-CI.yml +++ b/.azure-pipelines/Windows-CI.yml @@ -11,7 +11,8 @@ parameters: jobs: - job: Build_onnx_mlir_Windows - timeoutInMinutes: 240 + # 4h timeout is sometimes a tiny bit short when llvm-project is rebuilt + timeoutInMinutes: 270 pool: vmImage: 'windows-2019' From de095c0c8fc94197e3215306aa099eaca4b11af7 Mon Sep 17 00:00:00 2001 From: Philip Lassen Date: Sun, 17 Sep 2023 13:32:43 -0700 Subject: [PATCH 2/3] Update CMakeLists.txt to add OpenMPDialect (#2514) Signed-off-by: Philip Lassen --- src/Compiler/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Compiler/CMakeLists.txt b/src/Compiler/CMakeLists.txt index 1b086237be..e75a55dd94 100644 --- a/src/Compiler/CMakeLists.txt +++ b/src/Compiler/CMakeLists.txt @@ -68,6 +68,7 @@ add_onnx_mlir_library(OMCompilerDialects OMKrnlOps OMONNXOps MLIRIR + MLIROpenMPDialect ) add_onnx_mlir_library(OMCompilerPasses From 5aca4540b90d6db4700630f3a92fb75d451eaa79 Mon Sep 17 00:00:00 2001 From: Soren Lassen Date: Sun, 17 Sep 2023 15:04:40 -0700 Subject: [PATCH 3/3] fix f16 onnx.BatchNormalizationInferenceMode canonicalization (#2512) Signed-off-by: Soren Lassen --- src/Dialect/ONNX/ONNXOps/OpHelper.cpp | 6 +++++- test/mlir/onnx/onnx_canonicalization.mlir | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/Dialect/ONNX/ONNXOps/OpHelper.cpp b/src/Dialect/ONNX/ONNXOps/OpHelper.cpp index 809a238b48..45ee7f8449 100644 --- a/src/Dialect/ONNX/ONNXOps/OpHelper.cpp +++ b/src/Dialect/ONNX/ONNXOps/OpHelper.cpp @@ -411,7 +411,11 @@ ArrayAttr createArrayAttrFromConstantOp(ONNXConstantOp constOp) { DenseElementsAttr createDenseElementsAttrFromFloatAttr( PatternRewriter &rewriter, Type elementType, FloatAttr attr) { auto tensorType = RankedTensorType::get({1}, elementType); - return DenseElementsAttr::get(tensorType, {attr.getValue()}); + auto ftype = cast(elementType); + APFloat f = attr.getValue(); + bool ignored; + f.convert(ftype.getFloatSemantics(), APFloat::rmNearestTiesToEven, &ignored); + return DenseElementsAttr::get(tensorType, {f}); } //===----------------------------------------------------------------------===// diff --git a/test/mlir/onnx/onnx_canonicalization.mlir b/test/mlir/onnx/onnx_canonicalization.mlir index 87c8f2a853..f58fe49142 100644 --- a/test/mlir/onnx/onnx_canonicalization.mlir +++ b/test/mlir/onnx/onnx_canonicalization.mlir @@ -697,6 +697,26 @@ func.func @test_rewrite_batchnormtestmode_1d(%arg0 : tensor<64xf32>, %scale : te // ----- +func.func @test_rewrite_batchnormtestmode_1d_f16(%arg0 : tensor<64xf16>, %scale : tensor<1xf32>, %bias : tensor<1xf32>, %mean : tensor<1xf32>, %var : tensor<1xf32>) -> tensor<64xf16> { + %0 = "onnx.BatchNormalizationInferenceMode"(%arg0, %scale, %bias, %mean, %var) {epsilon = 1.00000007E-5 : f32} : (tensor<64xf16>, tensor<1xf32>, tensor<1xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<64xf16> + onnx.Return %0 : tensor<64xf16> + +// CHECK-LABEL: func.func @test_rewrite_batchnormtestmode_1d_f16 +// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<64xf16>, [[PARAM_1_:%.+]]: tensor<1xf32>, [[PARAM_2_:%.+]]: tensor<1xf32>, [[PARAM_3_:%.+]]: tensor<1xf32>, [[PARAM_4_:%.+]]: tensor<1xf32>) -> tensor<64xf16> { +// CHECK: [[VAR_0_:%.+]] = onnx.Constant dense<1.001360e-05> : tensor<1xf16> +// CHECK: [[VAR_1_:%.+]] = "onnx.Add"([[PARAM_4_]], [[VAR_0_]]) : (tensor<1xf32>, tensor<1xf16>) -> tensor<*xf32> +// CHECK: [[VAR_2_:%.+]] = "onnx.Sqrt"([[VAR_1_]]) : (tensor<*xf32>) -> tensor<*xf32> +// CHECK: [[VAR_3_:%.+]] = "onnx.Div"([[PARAM_1_]], [[VAR_2_]]) : (tensor<1xf32>, tensor<*xf32>) -> tensor<*xf32> +// CHECK-DAG: [[VAR_4_:%.+]] = "onnx.Mul"([[PARAM_0_]], [[VAR_3_]]) : (tensor<64xf16>, tensor<*xf32>) -> tensor<*xf16> +// CHECK-DAG: [[VAR_5_:%.+]] = "onnx.Mul"([[PARAM_3_]], [[VAR_3_]]) : (tensor<1xf32>, tensor<*xf32>) -> tensor<*xf32> +// CHECK: [[VAR_6_:%.+]] = "onnx.Sub"([[PARAM_2_]], [[VAR_5_]]) : (tensor<1xf32>, tensor<*xf32>) -> tensor<*xf32> +// CHECK: [[VAR_7_:%.+]] = "onnx.Add"([[VAR_4_]], [[VAR_6_]]) : (tensor<*xf16>, tensor<*xf32>) -> tensor<64xf16> +// CHECK: onnx.Return [[VAR_7_]] : tensor<64xf16> +// CHECK: } +} + +// ----- + func.func @test_normalize_add(%arg0 : tensor<2xf32>) -> tensor<2xf32> { %cst = "onnx.NoValue"() {value} : () -> none %0 = onnx.Constant dense<[0.0, 1.0]> : tensor<2xf32>