diff --git a/mmcv/ops/csrc/pytorch/npu/bbox_overlaps_npu.cpp b/mmcv/ops/csrc/pytorch/npu/bbox_overlaps_npu.cpp index 3e346e7337..4a110ae07c 100644 --- a/mmcv/ops/csrc/pytorch/npu/bbox_overlaps_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/bbox_overlaps_npu.cpp @@ -25,7 +25,7 @@ void bbox_overlaps_npu(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, bboxesFP32 = bboxesFP32.to(at::kFloat); gtboxesFP32 = gtboxesFP32.to(at::kFloat); } - c10::SmallVector iousSize = {gtboxesFP32.size(0), + c10::SmallVector iousSize = {gtboxesFP32.size(0), bboxesFP32.size(0)}; if (aligned) { iousSize = {gtboxesFP32.size(0), 1}; diff --git a/mmcv/ops/csrc/pytorch/npu/common_util.h b/mmcv/ops/csrc/pytorch/npu/common_util.h index 2092763860..8401c3e680 100644 --- a/mmcv/ops/csrc/pytorch/npu/common_util.h +++ b/mmcv/ops/csrc/pytorch/npu/common_util.h @@ -1,5 +1,6 @@ #ifndef MMCV_OPS_CSRC_COMMON__UTIL_HPP_ #define MMCV_OPS_CSRC_COMMON__UTIL_HPP_ + const int SIZE = 8; #endif // MMCV_OPS_CSRC_COMMON__UTIL_HPP_ diff --git a/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp b/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp index e8cd552ef7..f9214b4b14 100644 --- a/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/fused_bias_leakyrelu_npu.cpp @@ -16,7 +16,7 @@ Tensor fused_bias_leakyrelu_npu(const Tensor &input, const Tensor &bias, if (grad == 0) { auto input_size = input.sizes(); int input_length = input_size.size(); - c10::SmallVector input_size_tmp; + c10::SmallVector input_size_tmp; for (uint64_t i = 0; i < input_size.size(); i++) { input_size_tmp.emplace_back(input_size[i]); } diff --git a/mmcv/ops/csrc/pytorch/npu/nms_npu.cpp b/mmcv/ops/csrc/pytorch/npu/nms_npu.cpp index a7fbebee93..f62054ea6c 100644 --- a/mmcv/ops/csrc/pytorch/npu/nms_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/nms_npu.cpp @@ -15,7 +15,7 @@ Tensor nms_npu(Tensor boxes, Tensor scores, float iou_threshold, int offset) { at::Tensor max_outputsize_y = at::empty({}, boxes.options().dtype(at::kInt)).fill_(boxes.size(0)); - c10::SmallVector outputsize = {boxes.size(0)}; + c10::SmallVector outputsize = {boxes.size(0)}; at::Tensor output = at::empty(outputsize, boxes.options().dtype(at::kInt)).fill_(-1); OpCommand cmd; diff --git a/mmcv/ops/csrc/pytorch/npu/nms_rotated_npu.cpp b/mmcv/ops/csrc/pytorch/npu/nms_rotated_npu.cpp index 1b2107eb99..c768671cb8 100644 --- a/mmcv/ops/csrc/pytorch/npu/nms_rotated_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/nms_rotated_npu.cpp @@ -12,7 +12,7 @@ Tensor nms_rotated_npu(const Tensor dets, const Tensor scores, detsCast = detsCast.to(at::kFloat); scoresCast = scoresCast.to(at::kFloat); } - c10::SmallVector selectedIndexSize = {dets.size(0)}; + c10::SmallVector selectedIndexSize = {dets.size(0)}; at::Tensor selectedBox = at::empty_like(dets); at::Tensor selectedIndex = diff --git a/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp b/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp index b1edd88865..2255d302e3 100644 --- a/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/roi_align_npu.cpp @@ -43,7 +43,7 @@ void roi_align_backward_npu(Tensor grad_output, Tensor rois, Tensor argmax_y, roi_end_mode = 0; } auto shape = grad_input.sizes(); - c10::SmallVector xdiff_shape; + c10::SmallVector xdiff_shape; for (uint64_t i = 0; i < shape.size(); i++) { xdiff_shape.emplace_back(shape[i]); } diff --git a/mmcv/ops/csrc/pytorch/npu/roi_align_rotated_npu.cpp b/mmcv/ops/csrc/pytorch/npu/roi_align_rotated_npu.cpp index 3d7560f49c..d28d50fd40 100644 --- a/mmcv/ops/csrc/pytorch/npu/roi_align_rotated_npu.cpp +++ b/mmcv/ops/csrc/pytorch/npu/roi_align_rotated_npu.cpp @@ -33,7 +33,7 @@ void roi_align_rotated_backward_npu(Tensor top_grad, Tensor rois, int64_t aligned_height_64 = aligned_height; int64_t aligned_width_64 = aligned_width; int64_t sampling_ratio_64 = sampling_ratio; - c10::SmallVector y_grad_shape; + c10::SmallVector y_grad_shape; auto shape = bottom_grad.sizes(); for (uint64_t i = 0; i < shape.size(); i++) { y_grad_shape.emplace_back(shape[i]); diff --git a/mmcv/ops/csrc/pytorch/npu/roipoint_pool3d_forward.cpp b/mmcv/ops/csrc/pytorch/npu/roipoint_pool3d_forward.cpp index 1cf6500eeb..2fc645c62b 100644 --- a/mmcv/ops/csrc/pytorch/npu/roipoint_pool3d_forward.cpp +++ b/mmcv/ops/csrc/pytorch/npu/roipoint_pool3d_forward.cpp @@ -13,12 +13,12 @@ void roipoint_pool3d_forward_impl_npu(int batch_size, int pts_num, Tensor pooled_empty_flag) { auto points_trans = xyz.transpose(1, 2).contiguous(); auto point_features_trans = pts_feature.transpose(1, 2).contiguous(); - c10::SmallVector features_trans_size = { + c10::SmallVector features_trans_size = { xyz.size(0), boxes3d.size(1), xyz.size(2) + pts_feature.size(2), sampled_pts_num}; at::Tensor pooled_features_trans = at::empty(features_trans_size, xyz.options()); - c10::SmallVector empty_flag_size = {boxes3d.size(0), + c10::SmallVector empty_flag_size = {boxes3d.size(0), boxes3d.size(1)}; EXEC_NPU_CMD(aclnnRoipointPool3dForward, points_trans, point_features_trans, boxes3d, sampled_pts_num, pooled_features_trans,