From 7eb49ffea93a910caa042cb5161bbf6b9e585d68 Mon Sep 17 00:00:00 2001 From: Kevin Chen <45886021+kevinch-nv@users.noreply.github.com> Date: Tue, 20 Oct 2020 13:50:16 -0700 Subject: [PATCH] Merge master changes -> 7.1 (#547) * Prefix logging messages with [TRT] (#497) * Add local build instructions (#498) * fix duplicate layer names bug (#446) (#467) Suppose we have a network with (not all distinct) layer names layer layer_1 layer When ImporterContext sees "layer", it sees it's not in mLayerNameCounts, and sets mLayerNameCounts["layer"] = 1 and adds a TRT layer with name "layer". It then sees "layer_1", concludes it's not in mLayerNameCounts, so it sets mLayerNameCounts["layer_1"] = 1 and adds a TRT layer with name "layer_1". NOW when it sees "layer", it sees that mLayerNameCounts["layer"] == 1, so we produce a "uniqueName" of "layer" + "_" + std::to_string(mLayerNameCounts["layer"] ), ie "layer_1", which is a name conflict for the TRT net. This change keeps track of all inserted names in a set and in the case of duplicates, tries suffix-appended modifications of the duplicated name by ever increasing integers until a name appears which has not been used. * Support Dynamic and 3D instanceNormalization (#515) * Add restrictions on multi-input convolutions (#521) * Update resize limitations in TensorRT (#538) Signed-off-by: Kevin Chen Co-authored-by: Thomas Peters --- ImporterContext.hpp | 53 +++++++++++++++++++++++------------ ModelImporter.cpp | 4 +-- README.md | 9 ++++-- builtin_op_importers.cpp | 60 ++++++++++++++++++++++++++++++++-------- onnx2trt_utils.hpp | 2 +- 5 files changed, 95 insertions(+), 33 deletions(-) diff --git a/ImporterContext.hpp b/ImporterContext.hpp index 9c803114..d30cf3b9 100644 --- a/ImporterContext.hpp +++ b/ImporterContext.hpp @@ -26,11 +26,12 @@ #include "onnx2trt_utils.hpp" #include +#include +#include #include namespace onnx2trt { - class ImporterContext final : public IImporterContext { nvinfer1::INetworkDefinition* _network; @@ -44,13 +45,17 @@ class ImporterContext final : public IImporterContext StringMap mTensorRangeMins; StringMap mTensorRangeMaxes; StringMap mLayerPrecisions; - StringMap - mTensorNameCounts; // Keep track of how many times a tensor name shows up, to avoid duplicate naming in TRT. - StringMap - mLayerNameCounts; // Keep track of how many times a tensor name shows up, to avoid duplicate naming in TRT. - std::unordered_set mUnsupportedShapeTensors; // Container to hold output tensor names of layers that produce shape tensor outputs but do not natively support them. - StringMap mLoopTensors; // Container to map subgraph tensors to their original outer graph names. - std::string mOnnxFileLocation; // Keep track of the directory of the parsed ONNX file + std::set mTensorNames; // keep track of tensor names used so far, + // to avoid duplicate naming in TRT. + std::set mLayerNames; // keep track of layer names used so far, + // to avoid duplicate naming in TRT. + int64_t mSuffixCounter = 0; // increasing suffix counter used to uniquify layer names. + std::unordered_set mUnsupportedShapeTensors; // Container to hold any shape tensors that are + // the output of layers that do not support + // shape tensors. + StringMap mLoopTensors; // Container to map subgraph tensors to + // their original outer graph names. + std::string mOnnxFileLocation; // Keep track of the directory of the parsed ONNX file public: ImporterContext(nvinfer1::INetworkDefinition* network, nvinfer1::ILogger* logger) @@ -98,13 +103,12 @@ class ImporterContext final : public IImporterContext { return mOnnxFileLocation; } - // This actually handles weights as well, but is named this way to be consistent with the tensors() + // This actually handles weights as well, but is named this way to be + // consistent with the tensors() virtual void registerTensor(TensorOrWeights tensor, const std::string& basename) override { // TRT requires unique tensor names. - const std::string uniqueName - = mTensorNameCounts[basename] ? (basename + "_" + std::to_string(mTensorNameCounts[basename])) : basename; - ++mTensorNameCounts[basename]; + const std::string uniqueName = generateUniqueName(mTensorNames, basename); if (tensor) { @@ -122,8 +126,9 @@ class ImporterContext final : public IImporterContext convertINT64(reinterpret_cast(weights.values), weights.shape, ctx), weights.shape}; } } - // Overwrite previous tensors registered with the same name (this only happens when there are subgraphs, - // and in that case, overwriting is the desired behavior). + // Overwrite previous tensors registered with the same name (this only + // happens when there are subgraphs, and in that case, overwriting is the + // desired behavior). this->tensors()[basename] = std::move(tensor); } @@ -133,9 +138,7 @@ class ImporterContext final : public IImporterContext if (layer) { const std::string name = basename.empty() ? layer->getName() : basename; - const std::string uniqueName - = mLayerNameCounts[name] ? (name + "_" + std::to_string(mLayerNameCounts[name])) : name; - ++mLayerNameCounts[name]; + const std::string uniqueName = generateUniqueName(mLayerNames, basename); auto* ctx = this; // To enable logging. LOG_VERBOSE("Registering layer: " << name << " for ONNX node: " << basename); @@ -225,6 +228,22 @@ class ImporterContext final : public IImporterContext return _opsets.at(domain); } } + +private: + std::string generateUniqueName(std::set& namesSet, const std::string& basename) + { + std::string candidate = basename; + + while (namesSet.find(candidate) != namesSet.end()) + { + candidate = basename + "_" + std::to_string(mSuffixCounter); + ++mSuffixCounter; + } + + namesSet.insert(candidate); + + return candidate; + } }; } // namespace onnx2trt diff --git a/ModelImporter.cpp b/ModelImporter.cpp index 797ae801..170f26a7 100644 --- a/ModelImporter.cpp +++ b/ModelImporter.cpp @@ -374,7 +374,7 @@ bool ModelImporter::supportsModel( std::vector topological_order; if (!toposort(model.graph().node(), &topological_order)) { - cout << "Failed to sort model topologically, exiting ..." << endl; + LOG_ERROR("Failed to sort model topologically, exiting ..."); return false; } @@ -408,7 +408,7 @@ bool ModelImporter::supportsModel( } else { - std::cout << "Found unsupported node: " << tensorName << std::endl; + LOG_WARNING("Found unsupported node: " << tensorName); // This is not a supported node, reset newSubGraph newSubGraph = true; allSupported = false; diff --git a/README.md b/README.md index 18545503..ddf17c37 100644 --- a/README.md +++ b/README.md @@ -43,10 +43,15 @@ Current supported ONNX operators are found in the [operator support matrix](oper ### Building -For building on master, we recommend following the instructions on the [master branch of TensorRT](https://github.com/NVIDIA/TensorRT/) as there are new dependencies that were introduced to support these new features. +For building on master, we recommend following the instructions on the [master branch of TensorRT](https://github.com/NVIDIA/TensorRT/) to take advatange of the latest plugin code required for importing certain operators. -To build on older branches refer to their respective READMEs. +To build only the ONNX-TensorRT parser, follow the following steps: + cd onnx-tensorrt + mkdir build && cd build + cmake .. -DTENSORRT_ROOT= && make -j + // Ensure that you update your LD_LIBRARY_PATH to pick up the location of the newly built library: + export LD_LIBRARY_PATH=$PWD:$LD_LIBRARY_PATH ## Executable usage diff --git a/builtin_op_importers.cpp b/builtin_op_importers.cpp index 68a8ae34..7cc17b59 100644 --- a/builtin_op_importers.cpp +++ b/builtin_op_importers.cpp @@ -430,7 +430,7 @@ DEFINE_BUILTIN_OP_IMPORTER(Conv) ASSERT(inputs.at(0).is_tensor(), ErrorCode::kUNSUPPORTED_NODE); if (inputs.at(1).is_tensor()) { - ASSERT(inputs.at(1).is_tensor(), ErrorCode::kUNSUPPORTED_NODE); + ASSERT(ctx->network()->hasExplicitPrecision() && "TensorRT only supports multi-input conv for explicit precision QAT networks!", ErrorCode::kUNSUPPORTED_NODE); if (inputs.size() == 3) { ASSERT(inputs.at(2).is_weights(), ErrorCode::kUNSUPPORTED_NODE); @@ -1613,7 +1613,8 @@ DEFINE_BUILTIN_OP_IMPORTER(InstanceNormalization) ASSERT(inputs.at(1).is_weights(), ErrorCode::kUNSUPPORTED_NODE); ASSERT(inputs.at(2).is_weights(), ErrorCode::kUNSUPPORTED_NODE); nvinfer1::ITensor* tensorPtr = &convertToTensor(inputs.at(0), ctx); - ASSERT(!isDynamic(tensorPtr->getDimensions()) && "InstanceNormalization does not support dynamic inputs!", + int nbDims = tensorPtr->getDimensions().nbDims; + ASSERT(nbDims >= 3 && nbDims <= 4 && "TensorRT only supports InstanceNormalization on 3D or 4D tensors!", ErrorCode::kUNSUPPORTED_NODE); auto scale_weights = inputs.at(1).weights(); auto bias_weights = inputs.at(2).weights(); @@ -2535,9 +2536,11 @@ DEFINE_BUILTIN_OP_IMPORTER(Resize) { nvinfer1::ITensor& input = convertToTensor(inputs.at(0), ctx); // TRT does not support INT32 nor BOOL input types for this node - ASSERT(input.getType() != nvinfer1::DataType::kINT32 && input.getType() != nvinfer1::DataType::kBOOL, ErrorCode::kUNSUPPORTED_NODE); + ASSERT( (input.getType() != nvinfer1::DataType::kINT32 + && input.getType() != nvinfer1::DataType::kBOOL) + && "This version of TensorRT does not support INT32 or BOOL input for the Resize operator.", ErrorCode::kUNSUPPORTED_NODE); int inputRank = input.getDimensions().nbDims; - ASSERT(inputRank > 0, ErrorCode::kUNSUPPORTED_NODE); + ASSERT( (inputRank > 0) && "The input tensor cannot be a scalar.", ErrorCode::kUNSUPPORTED_NODE); // Add resize layer nvinfer1::IResizeLayer* layer = ctx->network()->addResize(input); ctx->registerLayer(layer, node.name()); @@ -2562,22 +2565,57 @@ DEFINE_BUILTIN_OP_IMPORTER(Resize) && "This version of TensorRT only supports floor nearest_mode!", ErrorCode::kUNSUPPORTED_NODE); - // Note both asymmetric and align_corners resize modes go through the same import path in TRT: - if (transformationMode == "asymmetric" || transformationMode == "align_corners") - { - layer->setAlignCorners(true); - } - // The existence of a fourth input means a shape was passed as the resize parameter + // For ONNX resize with the "sizes", TensorRT's resize maps to ONNX's in the following ways: + // Nearest: + // alignCorners = 0: ASYMMETRIC + // alignCorners = 1: ALIGN_CORNERS + // Linear: + // alignCorners = 0: HALF_PIXEL + // alignCorners = 1: ALIGN_CORNERS if (inputs.size() == 4) { + if (transformationMode == "align_corners") + { + layer->setAlignCorners(true); + } + if (mode == "nearest") + { + ASSERT((transformationMode == "asymmetric" || transformationMode == "align_corners") && "TensorRT only supports asymmetric and align_corners transformation modes for nearest neighbor resizes when sizes are provided!", ErrorCode::kUNSUPPORTED_NODE); + } + else if (mode == "linear") + { + ASSERT((transformationMode == "half_pixel" || transformationMode == "pytorch_half_pixel" || transformationMode == "align_corners") && "TensorRT only supports half_pixel, pytorch_half_pixel, and align_corners transofmration modes for linear resizes when sizes are provided!", ErrorCode::kUNSUPPORTED_NODE); + } auto* resizeShape = &convertToTensor(inputs.at(3), ctx); layer->setInput(1, *resizeShape); layer->setResizeMode(resizeMode); RETURN_FIRST_OUTPUT(layer); } + // For ONNX resize with "scales", TensorRT's resize maps to ONNX's in the following ways: + // Nearest: + // alignCorners = 0: ASYMMETRIC + // alignCorners = 1: ASYMMETRIC + // Linear: + // alignCorners = 0: HALF_PIXEL + // alignCorners = 1: ASYMMETRIC + else + { + if (mode == "nearest") + { + ASSERT(transformationMode == "asymmetric" && "TensorRT only supports asymmetric tranformation mode for nearest neighbor resizes when scales are provided!",ErrorCode::kUNSUPPORTED_NODE); + } + else if (mode == "linear") + { + ASSERT((transformationMode == "asymmetric" || transformationMode == "pytorch_half_pixel" || transformationMode == "half_pixel") && "TensorRT only supports half pixel, pytorch half_pixel, and asymmetric tranformation mode for linear resizes when scales are provided!", ErrorCode::kUNSUPPORTED_NODE); + if (transformationMode == "asymmetric") + { + layer->setAlignCorners(true); + } + } + } } - // For opset 10 resize, the only supported mode is asymmetric resize, which is mapped to TRT's alignCorners. + // For opset 10 resize, the only supported mode is asymmetric resize with scales. else { transformationMode = "asymmetric"; diff --git a/onnx2trt_utils.hpp b/onnx2trt_utils.hpp index 95bfb17f..d01bd77a 100644 --- a/onnx2trt_utils.hpp +++ b/onnx2trt_utils.hpp @@ -39,7 +39,7 @@ do \ { \ std::stringstream ss{}; \ - ss << __FILENAME__ << ":" << __LINE__ << ": " << msg; \ + ss << "[TRT]" << __FILENAME__ << ":" << __LINE__ << ": " << msg; \ ctx->logger().log(severity, ss.str().c_str()); \ } while (0)