diff --git a/mmcv/ops/csrc/pytorch/npu/bbox_overlaps_npu.cpp b/mmcv/ops/csrc/pytorch/npu/bbox_overlaps_npu.cpp index ed04622af6..4a110ae07c 100644 --- a/mmcv/ops/csrc/pytorch/npu/bbox_overlaps_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/bbox_overlaps_npu.cpp @@ -1,3 +1,4 @@ +#include "common_util.h" #include "pytorch_npu_helper.hpp" using namespace NPU_NAME_SPACE; @@ -24,7 +25,7 @@ void bbox_overlaps_npu(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, bboxesFP32 = bboxesFP32.to(at::kFloat); gtboxesFP32 = gtboxesFP32.to(at::kFloat); } - c10::SmallVector iousSize = {gtboxesFP32.size(0), + c10::SmallVector iousSize = {gtboxesFP32.size(0), bboxesFP32.size(0)}; if (aligned) { iousSize = {gtboxesFP32.size(0), 1}; diff --git a/mmcv/ops/csrc/pytorch/npu/common_util.h b/mmcv/ops/csrc/pytorch/npu/common_util.h index db05e6a05c..2092763860 100644 --- a/mmcv/ops/csrc/pytorch/npu/common_util.h +++ b/mmcv/ops/csrc/pytorch/npu/common_util.h @@ -2,11 +2,4 @@ #define MMCV_OPS_CSRC_COMMON__UTIL_HPP_ const int SIZE = 8; -c10::SmallVector array_to_vector(c10::IntArrayRef shape) { - c10::SmallVector shape_small_vec; - for (uint64_t i = 0; i < shape.size(); i++) { - shape_small_vec.emplace_back(shape[i]); - } -} - #endif // MMCV_OPS_CSRC_COMMON__UTIL_HPP_ diff --git a/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp b/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp index 2e1270e450..f9214b4b14 100644 --- a/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp @@ -1,3 +1,4 @@ +#include "common_util.h" #include "pytorch_npu_helper.hpp" using namespace NPU_NAME_SPACE; @@ -15,7 +16,7 @@ Tensor fused_bias_leakyrelu_npu(const Tensor &input, const Tensor &bias, if (grad == 0) { auto input_size = input.sizes(); int input_length = input_size.size(); - c10::SmallVector input_size_tmp; + c10::SmallVector input_size_tmp; for (uint64_t i = 0; i < input_size.size(); i++) { input_size_tmp.emplace_back(input_size[i]); } diff --git a/mmcv/ops/csrc/pytorch/npu/nms_npu.cpp b/mmcv/ops/csrc/pytorch/npu/nms_npu.cpp index 5638c8be38..f62054ea6c 100644 --- a/mmcv/ops/csrc/pytorch/npu/nms_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/nms_npu.cpp @@ -1,3 +1,4 @@ +#include "common_util.h" #include "pytorch_npu_helper.hpp" using namespace NPU_NAME_SPACE; @@ -14,7 +15,7 @@ Tensor nms_npu(Tensor boxes, Tensor scores, float iou_threshold, int offset) { at::Tensor max_outputsize_y = at::empty({}, boxes.options().dtype(at::kInt)).fill_(boxes.size(0)); - c10::SmallVector outputsize = {boxes.size(0)}; + c10::SmallVector outputsize = {boxes.size(0)}; at::Tensor output = at::empty(outputsize, boxes.options().dtype(at::kInt)).fill_(-1); OpCommand cmd; diff --git a/mmcv/ops/csrc/pytorch/npu/nms_rotated_npu.cpp b/mmcv/ops/csrc/pytorch/npu/nms_rotated_npu.cpp index 717f954cff..c768671cb8 100644 --- a/mmcv/ops/csrc/pytorch/npu/nms_rotated_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/nms_rotated_npu.cpp @@ -1,3 +1,4 @@ +#include "common_util.h" #include "pytorch_npu_helper.hpp" using namespace NPU_NAME_SPACE; @@ -11,7 +12,7 @@ Tensor nms_rotated_npu(const Tensor dets, const Tensor scores, detsCast = detsCast.to(at::kFloat); scoresCast = scoresCast.to(at::kFloat); } - c10::SmallVector selectedIndexSize = {dets.size(0)}; + c10::SmallVector selectedIndexSize = {dets.size(0)}; at::Tensor selectedBox = at::empty_like(dets); at::Tensor selectedIndex = diff --git a/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp b/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp index 0e673614fa..2255d302e3 100644 --- a/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp @@ -1,3 +1,4 @@ +#include "common_util.h" #include "pytorch_npu_helper.hpp" using namespace NPU_NAME_SPACE; @@ -42,7 +43,7 @@ void roi_align_backward_npu(Tensor grad_output, Tensor rois, Tensor argmax_y, roi_end_mode = 0; } auto shape = grad_input.sizes(); - c10::SmallVector xdiff_shape; + c10::SmallVector xdiff_shape; for (uint64_t i = 0; i < shape.size(); i++) { xdiff_shape.emplace_back(shape[i]); } diff --git a/mmcv/ops/csrc/pytorch/npu/roi_align_rotated_npu.cpp b/mmcv/ops/csrc/pytorch/npu/roi_align_rotated_npu.cpp index 2a3ff09e98..d28d50fd40 100644 --- a/mmcv/ops/csrc/pytorch/npu/roi_align_rotated_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/roi_align_rotated_npu.cpp @@ -1,3 +1,4 @@ +#include "common_util.h" #include "pytorch_npu_helper.hpp" using namespace NPU_NAME_SPACE; @@ -32,7 +33,7 @@ void roi_align_rotated_backward_npu(Tensor top_grad, Tensor rois, int64_t aligned_height_64 = aligned_height; int64_t aligned_width_64 = aligned_width; int64_t sampling_ratio_64 = sampling_ratio; - c10::SmallVector y_grad_shape; + c10::SmallVector y_grad_shape; auto shape = bottom_grad.sizes(); for (uint64_t i = 0; i < shape.size(); i++) { y_grad_shape.emplace_back(shape[i]); diff --git a/mmcv/ops/csrc/pytorch/npu/roipoint_pool3d_forward.cpp b/mmcv/ops/csrc/pytorch/npu/roipoint_pool3d_forward.cpp index 48d5b4789e..2fc645c62b 100644 --- a/mmcv/ops/csrc/pytorch/npu/roipoint_pool3d_forward.cpp +++ b/mmcv/ops/csrc/pytorch/npu/roipoint_pool3d_forward.cpp @@ -1,3 +1,4 @@ +#include "common_util.h" #include "pytorch_npu_helper.hpp" using namespace NPU_NAME_SPACE; @@ -12,12 +13,12 @@ void roipoint_pool3d_forward_impl_npu(int batch_size, int pts_num, Tensor pooled_empty_flag) { auto points_trans = xyz.transpose(1, 2).contiguous(); auto point_features_trans = pts_feature.transpose(1, 2).contiguous(); - c10::SmallVector features_trans_size = { + c10::SmallVector features_trans_size = { xyz.size(0), boxes3d.size(1), xyz.size(2) + pts_feature.size(2), sampled_pts_num}; at::Tensor pooled_features_trans = at::empty(features_trans_size, xyz.options()); - c10::SmallVector empty_flag_size = {boxes3d.size(0), + c10::SmallVector empty_flag_size = {boxes3d.size(0), boxes3d.size(1)}; EXEC_NPU_CMD(aclnnRoipointPool3dForward, points_trans, point_features_trans, boxes3d, sampled_pts_num, pooled_features_trans,