Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update IREE onnx import to be in sync with Torch-MLIR #17476

Merged
merged 8 commits into from
May 23, 2024
Merged
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,11 @@
python -m iree.compiler.tools.import_onnx ...
"""
import argparse
import os
from pathlib import Path
import shutil
saienduri marked this conversation as resolved.
Show resolved Hide resolved
import sys
import tempfile

try:
import onnx
Expand All @@ -38,8 +41,8 @@
)


def main(args):
model_proto = load_onnx_model(args.input_file)
def main(args: argparse.Namespace):
model_proto = load_onnx_model(args)
context = Context()
model_info = onnx_importer.ModelInfo(model_proto)
m = model_info.create_module(context=context).operation
Expand All @@ -58,13 +61,64 @@ def main(args):
print(m.get_asm(assume_verified=not args.no_verify))


def load_onnx_model(file_path: Path) -> onnx.ModelProto:
raw_model = onnx.load(file_path)
inferred_model = onnx.shape_inference.infer_shapes(raw_model)
def load_onnx_model(args: argparse.Namespace) -> onnx.ModelProto:
# Do shape inference two ways. First, attempt in-memory to avoid redundant
# loading and the need for writing a temporary file somewhere. If that
# fails, typically because of the 2 GB protobuf size limit, try again via
# files. See
# https://github.com/onnx/onnx/blob/main/docs/PythonAPIOverview.md#shape-inference-a-large-onnx-model-2gb
# for details about the file-based technique.
saienduri marked this conversation as resolved.
Show resolved Hide resolved

# Make a temp dir for all the temp files we'll be generating as a side
# effect of infering shapes. For now, the only file is a new .onnx holding
# the revised model with shapes.
#
# TODO: If the program temp_dir is None, we should be using an ephemeral
# temp directory instead of a hard-coded path in order to avoid data races
# by default.
saienduri marked this conversation as resolved.
Show resolved Hide resolved
input_dir = os.path.dirname(os.path.abspath(args.input_file))
temp_dir = tempfile.TemporaryDirectory(dir=input_dir)
temp_dir_path = Path(temp_dir.name)

# Load the model, with possible external data coming from the default
# location, or the location specified on the command line.
if args.data_dir is None:
raw_model = onnx.load(args.input_file)
else:
raw_model = onnx.load(args.input_file, load_external_data=False)
onnx.load_external_data_for_model(raw_model, args.data_dir)
ScottTodd marked this conversation as resolved.
Show resolved Hide resolved

# Run the checker to test whether the file is above the threshold for
# in-memory shape inference. If not, go ahead and do the shape inference.
try:
onnx.checker.check_model(raw_model)
inferred_model = onnx.shape_inference.infer_shapes(
raw_model, data_prop=args.data_prop
)
return inferred_model
except ValueError:
pass
Comment on lines +81 to +90
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I saw 159 new test import failures on Windows after pulling this code in.

D:\dev\projects\SHARK-TestSuite\iree_tests (main -> upstream)
(nightly_pip.venv) λ iree-import-onnx D:\dev\projects\SHARK-TestSuite\third_party\onnx\onnx\backend\test\data\node\test_cast_DOUBLE_to_FLOAT\model.onnx -o D:\dev\projects\iree-tmp\double_to_float.mlir

Traceback (most recent call last):
  File "<frozen runpy>", line 198, in _run_module_as_main
  File "<frozen runpy>", line 88, in _run_code
  File "D:\dev\projects\SHARK-TestSuite\iree_tests\nightly_pip.venv\Scripts\iree-import-onnx.exe\__main__.py", line 7, in <module>
  File "D:\dev\projects\SHARK-TestSuite\iree_tests\nightly_pip.venv\Lib\site-packages\iree\compiler\tools\import_onnx\__main__.py", line 140, in _cli_main
    sys.exit(main(parse_arguments()))
             ^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\dev\projects\SHARK-TestSuite\iree_tests\nightly_pip.venv\Lib\site-packages\iree\compiler\tools\import_onnx\__main__.py", line 44, in main
    model_proto = load_onnx_model(args)
                  ^^^^^^^^^^^^^^^^^^^^^
  File "D:\dev\projects\SHARK-TestSuite\iree_tests\nightly_pip.venv\Lib\site-packages\iree\compiler\tools\import_onnx\__main__.py", line 84, in load_onnx_model
    onnx.checker.check_model(raw_model)
  File "D:\dev\projects\SHARK-TestSuite\iree_tests\nightly_pip.venv\Lib\site-packages\onnx\checker.py", line 148, in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
onnx.onnx_cpp2py_export.checker.ValidationError: Your model ir_version 10 is higher than the checker's (9).

Updating my onnx Python package from 1.15.0 to 1.16.1 helped. We should update some of these or make this code tolerate different versions:

"onnx>=1.15.0",
pip install onnx>=1.15.0

With the newer onnx Python package, there are now 2 new import successes and 0 new import failures.


# Model is too big for in-memory inference: do file-based shape inference
# to a temp file.
temp_inferred_file = temp_dir_path / "temp-inferred.onnx"
onnx.shape_inference.infer_shapes_path(
args.input_file, temp_inferred_file, data_prop=args.data_prop
)

# Load the temp file and the external data.
inferred_model = onnx.load(temp_inferred_file, load_external_data=False)
data_dir = Path(input_dir if args.temp_dir is None else args.data_dir)
onnx.load_external_data_for_model(inferred_model, data_dir)

# Remove the inferred shape file unless asked to keep it
if not args.keep_temps:
temp_dir.cleanup()

return inferred_model


def parse_arguments(argv=None):
def parse_arguments(argv=None) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="IREE ONNX import tool")
parser.add_argument("input_file", help="ONNX protobuf input", type=Path)
parser.add_argument(
Expand All @@ -75,6 +129,30 @@ def parse_arguments(argv=None):
action="store_true",
help="Disable verification prior to printing",
)
parser.add_argument(
"--data-prop",
dest="data_prop",
saienduri marked this conversation as resolved.
Show resolved Hide resolved
default=True,
action=argparse.BooleanOptionalAction,
help="Toggle data propogation for onnx shape inference",
)
parser.add_argument(
"--keep-temps", action="store_true", help="Keep intermediate files"
)
parser.add_argument(
"--temp-dir",
help="Pre-existing directory in which to create temporary files."
' For example, to place temporaries under the directory "foo/bar",'
' specify --temp-dir=foo/bar. "foo/bar" must already exist.'
" Defaults to the directory of the input file.",
type=Path,
)
saienduri marked this conversation as resolved.
Show resolved Hide resolved
parser.add_argument(
"--data-dir",
help="Path to the base directory of the data."
" Defaults to the directory of the input file.",
type=Path,
)
args = parser.parse_args(argv)
return args

Expand Down
Loading