diff --git a/docker/perception/tracking/tracking.Dockerfile b/docker/perception/tracking/tracking.Dockerfile index 368c9ac6..c9a3669e 100644 --- a/docker/perception/tracking/tracking.Dockerfile +++ b/docker/perception/tracking/tracking.Dockerfile @@ -21,8 +21,12 @@ FROM ${BASE_IMAGE} as dependencies # Install Rosdep requirements COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list +RUN apt-get update RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) +# install opencv +RUN apt-get install -y libopencv-dev + # Copy in source code from source stage WORKDIR ${AMENT_WS} COPY --from=source ${AMENT_WS}/src src diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index f095ff37..02729523 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -29,7 +29,7 @@ services: - driver: nvidia count: 1 capabilities: [ gpu ] - command: /bin/bash -c "ros2 launch camera_object_detection eve.launch.py" + command: /bin/bash -c "ros2 launch camera_object_detection nuscenes_launch.py" volumes: - /mnt/wato-drive2/perception_models/yolov8m.pt:/perception_models/yolov8m.pt - /mnt/wato-drive2/perception_models/traffic_light.pt:/perception_models/traffic_light.pt @@ -86,7 +86,7 @@ services: - "${PERCEPTION_TRACKING_IMAGE}:build_main" target: deploy image: "${PERCEPTION_TRACKING_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch tracking tracking.launch.py" + command: /bin/bash -c "ros2 launch dets_2d_3d dets_2d_3d.launch.py" depth_estimation: build: diff --git a/src/perception/tracking/CMakeLists.txt b/src/perception/tracking/CMakeLists.txt deleted file mode 100644 index ffb74e71..00000000 --- a/src/perception/tracking/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -cmake_minimum_required(VERSION 3.8) -project(tracking) - -if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-Wall -Wextra -Wpedantic) -endif() - -# find dependencies -find_package(ament_cmake REQUIRED) -# uncomment the following section in order to fill in -# further dependencies manually. -# find_package( REQUIRED) - -ament_package() diff --git a/src/perception/tracking/dets_2d_3d/CMakeLists.txt b/src/perception/tracking/dets_2d_3d/CMakeLists.txt new file mode 100644 index 00000000..1e7b3c78 --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/CMakeLists.txt @@ -0,0 +1,70 @@ +cmake_minimum_required(VERSION 3.10) +project(dets_2d_3d) + +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +find_package(ament_cmake REQUIRED) +find_package(rclcpp REQUIRED) +find_package(sample_msgs REQUIRED) # Custom package containing ROS2 messages +find_package(sensor_msgs REQUIRED) +find_package(vision_msgs REQUIRED) +find_package(pcl_conversions REQUIRED) +find_package(pcl_ros REQUIRED) +find_package(tf2 REQUIRED) +find_package(tf2_ros REQUIRED) +find_package(tf2_geometry_msgs REQUIRED) +find_package(geometry_msgs REQUIRED) +find_package(visualization_msgs REQUIRED) +find_package(std_msgs REQUIRED) + +add_library(projection_utils + src/projection_utils.cpp src/cluster.cpp) # add more non-ros node files here + +target_include_directories(projection_utils + PUBLIC include) + +ament_target_dependencies(projection_utils + rclcpp + sample_msgs + sensor_msgs + vision_msgs + visualization_msgs + pcl_conversions + pcl_ros + tf2 + tf2_ros + tf2_geometry_msgs + std_msgs + geometry_msgs) + +add_executable(dets_2d_3d_node src/dets_2d_3d_node.cpp) +target_include_directories(dets_2d_3d_node + PUBLIC include) +include(cmake/PCL.cmake) + +cmake_minimum_required(VERSION 2.8) +find_package( OpenCV REQUIRED ) +include_directories( ${OpenCV_INCLUDE_DIRS} ) +target_link_libraries (dets_2d_3d_node ${OpenCV_LIBS}) + +ament_target_dependencies(dets_2d_3d_node) +target_link_libraries(dets_2d_3d_node projection_utils) + +# Copy executable to installation location +install(TARGETS + dets_2d_3d_node + DESTINATION lib/${PROJECT_NAME}) + +# Copy launch files to installation location +install(DIRECTORY + config + launch + DESTINATION share/${PROJECT_NAME}) + +ament_package() diff --git a/src/perception/tracking/dets_2d_3d/cmake/PCL.cmake b/src/perception/tracking/dets_2d_3d/cmake/PCL.cmake new file mode 100644 index 00000000..330f3189 --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/cmake/PCL.cmake @@ -0,0 +1,8 @@ +find_package(PCL 1.10 REQUIRED) +list(REMOVE_ITEM PCL_LIBRARIES "vtkproj4") + +include_directories(${PCL_INCLUDE_DIRS}) +list(APPEND ALL_TARGET_LIBRARIES ${PCL_LIBRARIES}) + +link_directories(${PCL_LIBRARY_DIRS}) +add_definitions(${PCL_DEFINITIONS}) diff --git a/src/perception/tracking/dets_2d_3d/config/nuscenes_config.yaml b/src/perception/tracking/dets_2d_3d/config/nuscenes_config.yaml new file mode 100644 index 00000000..c5d1131f --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/config/nuscenes_config.yaml @@ -0,0 +1,33 @@ +dets_2d_3d_node: + ros__parameters: + camera_info_topic: /CAM_FRONT/camera_info + lidar_topic: /LIDAR_TOP + detections_topic: /detections + + publish_detections_topic: /detections_3d + publish_markers_topic: /markers_3d + publish_clusters_topic: /clustered_pc + + camera_frame: CAM_FRONT + lidar_frame: LIDAR_TOP + + clustering_params: + default: + clustering_distances: [5., 30., 45., 60.] + clustering_thresholds: [0.5, 1.1, 1.6, 2.1, 2.6] + cluster_size_min: 5. + cluster_size_max: 100000. + cluster_merge_threshold: 1.5 + person: + clustering_distances: [5., 30., 45., 60.] + clustering_thresholds: [3., 3.5, 4.5, 5.0, 5.6] + cluster_size_min: 2. + cluster_size_max: 100. + cluster_merge_threshold: 1.5 + traffic_light: + clustering_distances: [5., 30., 45., 60.] + clustering_thresholds: [3., 3.5, 4.5, 5.0, 5.6] + cluster_size_min: 2. + cluster_size_max: 100. + cluster_merge_threshold: 1.5 + \ No newline at end of file diff --git a/src/perception/tracking/dets_2d_3d/include/cluster.hpp b/src/perception/tracking/dets_2d_3d/include/cluster.hpp new file mode 100644 index 00000000..09d620b4 --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/include/cluster.hpp @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + +#include + +#include + +class Cluster { + public: + // makes a cloud from all points `in_cloud_ptr` that are indexed by + // `in_cluster_indices` + Cluster(const pcl::PointCloud::Ptr &in_cloud_ptr, + const std::vector &in_cluster_indices); + + pcl::PointXYZ getCentroid() { return centroid_; } + pcl::PointCloud::Ptr getCloud() { return cloud_; } + vision_msgs::msg::BoundingBox3D getBoundingBox(); + + bool isValid(); + int size() { return cloud_->size(); } + + private: + pcl::PointCloud::Ptr cloud_; + pcl::PointXYZ centroid_; + + pcl::PointXYZ max_point_; + pcl::PointXYZ min_point_; + + static int color_index; + static std::vector> colors; +}; diff --git a/src/perception/tracking/dets_2d_3d/include/dets_2d_3d_node.hpp b/src/perception/tracking/dets_2d_3d/include/dets_2d_3d_node.hpp new file mode 100644 index 00000000..289e5184 --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/include/dets_2d_3d_node.hpp @@ -0,0 +1,81 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "cluster.hpp" +#include "projection_utils.hpp" + +#include + +struct ClusteringParams; + +class TrackingNode : public rclcpp::Node { + public: + TrackingNode(); + + private: + std::map clusteringParams; + + // lidar and camera frame names + std::string lidarFrame_; + std::string cameraFrame_; + + // synchronization utils + std::mutex lidarCloud_mutex_; + + // camera information cached + bool transformInited_; + geometry_msgs::msg::TransformStamped transform_; + sensor_msgs::msg::CameraInfo::SharedPtr camInfo_; + pcl::PointCloud::Ptr lidarCloud_; + + // subscribers to camera intrinsics, lidar pts, camera feed, 2d detection + // boxes + rclcpp::Subscription::SharedPtr camInfo_subscriber_; + rclcpp::Subscription::SharedPtr lidar_subscriber_; + rclcpp::Subscription::SharedPtr det_subscriber_; + + // publish the 3d detections + rclcpp::Publisher::SharedPtr det3d_publisher_; + rclcpp::Publisher::SharedPtr marker_publisher_; + rclcpp::Publisher::SharedPtr pc_publisher_; + rclcpp::Publisher::SharedPtr pc_publisher2_; + + std::unique_ptr tf_buffer_; + std::shared_ptr tf_listener_; + + void readCameraInfo(const sensor_msgs::msg::CameraInfo::SharedPtr msg); + void receiveLidar(const sensor_msgs::msg::PointCloud2::SharedPtr msg); + void receiveDetections(const vision_msgs::msg::Detection2DArray::SharedPtr msg); + + int highestIOUScoredBBox(const std::vector bboxes, + const vision_msgs::msg::BoundingBox2D &detBBox, + const std::vector> &clusters); + double overlapBoundingBox(const vision_msgs::msg::BoundingBox2D &bboxA, + const vision_msgs::msg::BoundingBox2D &bboxB); + double iouScore(const vision_msgs::msg::BoundingBox2D &bboxA, + const vision_msgs::msg::BoundingBox2D &bboxB); + + std::map initializeClusteringParams( + const std::map &clustering_params_map); + + template + T getDefaultOrValue(std::map m, std::string key) { + if (auto it = m.find(key); it != m.end()) return it->second; + return m["default"]; + } +}; diff --git a/src/perception/tracking/dets_2d_3d/include/projection_utils.hpp b/src/perception/tracking/dets_2d_3d/include/projection_utils.hpp new file mode 100644 index 00000000..6c973312 --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/include/projection_utils.hpp @@ -0,0 +1,68 @@ +#pragma once + +#include "cluster.hpp" + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ClusteringParams { + // segment by distance + std::vector clustering_distances; + // Map of the nearest neighbor distance threshold for each segment, for each + // detection + std::vector clustering_thresholds; + + double cluster_size_min; + double cluster_size_max; + double cluster_merge_threshold; +}; + +// main helper functions used by the node -- should all be static +class ProjectionUtils { + public: + static void pointsInBbox(const pcl::PointCloud::Ptr &inlierCloud, + const pcl::PointCloud::Ptr &lidarCloud, + const std::vector &projs2d, + const vision_msgs::msg::BoundingBox2D &bbox); + + // P : projection matrix, pt : 3D lidar pt, transform: transform from lidar to + // camera frame + static std::optional projectLidarToCamera( + const geometry_msgs::msg::TransformStamped &transform, const std::array &P, + const pcl::PointXYZ &pt); + + static bool isPointInBbox(const geometry_msgs::msg::Point &pt, + const vision_msgs::msg::BoundingBox2D &bbox); + + static void removeFloor(const pcl::PointCloud::Ptr &lidarCloud, + const pcl::PointCloud::Ptr &cloud_filtered); + + static std::pair>, + std::vector> + getClusteredBBoxes(const pcl::PointCloud::Ptr &lidarCloud, + const ClusteringParams &clusteringParams); + + private: + static std::vector> clusterAndColor( + const pcl::PointCloud::Ptr &in_cloud_ptr, double in_max_cluster_distance, + double cluster_size_min, double cluster_size_max); + + static void checkClusterMerge(size_t in_cluster_id, + const std::vector> &in_clusters, + std::vector &in_out_visited_clusters, + std::vector &out_merge_indices, double in_merge_threshold); + + static std::shared_ptr mergeClusters( + const std::vector> &in_clusters, + const std::vector &in_merge_indices, std::vector &in_out_merged_clusters); + + static std::vector> checkAllForMerge( + const std::vector> &in_clusters, float in_merge_threshold); +}; diff --git a/src/perception/tracking/dets_2d_3d/launch/dets_2d_3d.launch.py b/src/perception/tracking/dets_2d_3d/launch/dets_2d_3d.launch.py new file mode 100644 index 00000000..ccf46579 --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/launch/dets_2d_3d.launch.py @@ -0,0 +1,27 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + ld = LaunchDescription() + config = os.path.join( + get_package_share_directory('dets_2d_3d'), + 'config', + 'nuscenes_config.yaml' + ) + + # nodes + detections_2d_3d_node = Node( + package='dets_2d_3d', + executable='dets_2d_3d_node', + name='dets_2d_3d_node', + parameters=[config], + arguments=['--ros-args', '--log-level', 'info'] + ) + + # finalize + ld.add_action(detections_2d_3d_node) + + return ld diff --git a/src/perception/tracking/dets_2d_3d/package.xml b/src/perception/tracking/dets_2d_3d/package.xml new file mode 100644 index 00000000..dbb6686d --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/package.xml @@ -0,0 +1,31 @@ + + + + dets_2d_3d + 0.0.0 + TODO: Package description + bolty + TODO: License declaration + + ament_cmake + + ament_lint_auto + ament_lint_common + + rclcpp + sample_msgs + sensor_msgs + vision_msgs + pcl_conversions + pcl_ros + tf2 + tf2_ros + tf2_geometry_msgs + geometry_msgs + visualization_msgs + std_msgs + + + ament_cmake + + diff --git a/src/perception/tracking/dets_2d_3d/src/cluster.cpp b/src/perception/tracking/dets_2d_3d/src/cluster.cpp new file mode 100644 index 00000000..3b81b8f0 --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/src/cluster.cpp @@ -0,0 +1,113 @@ +#include "cluster.hpp" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +typedef pcl::PointCloud ClusterCloud; +typedef vision_msgs::msg::BoundingBox3D BBox3D; + +int Cluster::color_index = 0; +std::vector> Cluster::colors = {{0, 0, 255}, {0, 255, 0}, {255, 0, 0}, + {255, 255, 0}, {0, 255, 255}, {255, 0, 255}}; + +Cluster::Cluster(const pcl::PointCloud::Ptr &in_cloud_ptr, + const std::vector &in_cluster_indices) + : cloud_{new ClusterCloud()} { + bool indexesGiven = in_cluster_indices.size() > 0; + + size_t max_index = (indexesGiven) ? in_cluster_indices.size() : in_cloud_ptr->points.size(); + + min_point_.x = std::numeric_limits::max(); + min_point_.y = std::numeric_limits::max(); + min_point_.z = std::numeric_limits::max(); + + max_point_.x = -std::numeric_limits::max(); + max_point_.y = -std::numeric_limits::max(); + max_point_.z = -std::numeric_limits::max(); + + for (size_t i = 0; i < max_index; ++i) { + size_t idx = (indexesGiven) ? in_cluster_indices[i] : i; + + pcl::PointXYZRGB pt; + pt.x = in_cloud_ptr->points[idx].x; + pt.y = in_cloud_ptr->points[idx].y; + pt.z = in_cloud_ptr->points[idx].z; + pt.r = colors[color_index % colors.size()][0]; + pt.g = colors[color_index % colors.size()][1]; + pt.b = colors[color_index % colors.size()][2]; + + if (pt.x < min_point_.x) min_point_.x = pt.x; + if (pt.y < min_point_.y) min_point_.y = pt.y; + if (pt.z < min_point_.z) min_point_.z = pt.z; + + if (pt.x > max_point_.x) max_point_.x = pt.x; + if (pt.y > max_point_.y) max_point_.y = pt.y; + if (pt.z > max_point_.z) max_point_.z = pt.z; + + cloud_->emplace_back(pt); + } + + ++color_index; +} + +BBox3D Cluster::getBoundingBox() { + BBox3D bounding_box_; + + if (cloud_->size() == 0) return bounding_box_; + + double length_ = max_point_.x - min_point_.x; + double width_ = max_point_.y - min_point_.y; + double height_ = max_point_.z - min_point_.z; + + bounding_box_.center.position.x = min_point_.x + length_ / 2; + bounding_box_.center.position.y = min_point_.y + width_ / 2; + bounding_box_.center.position.z = min_point_.z + height_ / 2; + + bounding_box_.size.x = ((length_ < 0) ? -1 * length_ : length_); + bounding_box_.size.y = ((width_ < 0) ? -1 * width_ : width_); + bounding_box_.size.z = ((height_ < 0) ? -1 * height_ : height_); + + // pose estimation + double rz = 0; + + { + std::vector points; + for (unsigned int i = 0; i < cloud_->points.size(); i++) { + cv::Point2f pt; + pt.x = cloud_->points[i].x; + pt.y = cloud_->points[i].y; + points.push_back(pt); + } + + cv::RotatedRect box = cv::minAreaRect(points); + rz = box.angle * 3.14 / 180; + bounding_box_.center.position.x = box.center.x; + bounding_box_.center.position.y = box.center.y; + bounding_box_.size.x = box.size.width; + bounding_box_.size.y = box.size.height; + } + + // set bounding box direction + tf2::Quaternion quat; + quat.setRPY(0.0, 0.0, rz); + + geometry_msgs::msg::Quaternion msg_quat = tf2::toMsg(quat); + bounding_box_.center.orientation = msg_quat; + + std::cout << cv::getBuildInformation() << std::endl; + + return bounding_box_; +} + +bool Cluster::isValid() { return cloud_->size() > 0; } diff --git a/src/perception/tracking/dets_2d_3d/src/dets_2d_3d_node.cpp b/src/perception/tracking/dets_2d_3d/src/dets_2d_3d_node.cpp new file mode 100644 index 00000000..9642dcc3 --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/src/dets_2d_3d_node.cpp @@ -0,0 +1,272 @@ +#include "dets_2d_3d_node.hpp" + +#include +#include +#include + +#include +#include + +#include +#include + +TrackingNode::TrackingNode() + : Node("dets_2d_3d", rclcpp::NodeOptions() + .allow_undeclared_parameters(true) + .automatically_declare_parameters_from_overrides(true)), + transformInited_{false}, + lidarCloud_{new pcl::PointCloud()} { + rclcpp::NodeOptions() + .allow_undeclared_parameters(true) + .automatically_declare_parameters_from_overrides(true); + + lidarFrame_ = this->get_parameter("lidar_frame").as_string(); + cameraFrame_ = this->get_parameter("camera_frame").as_string(); + + // setup pub subs + camInfo_subscriber_ = this->create_subscription( + this->get_parameter("camera_info_topic").as_string(), 10, + std::bind(&TrackingNode::readCameraInfo, this, std::placeholders::_1)); + + lidar_subscriber_ = this->create_subscription( + this->get_parameter("lidar_topic").as_string(), 10, + std::bind(&TrackingNode::receiveLidar, this, std::placeholders::_1)); + + det_subscriber_ = this->create_subscription( + this->get_parameter("detections_topic").as_string(), 10, + std::bind(&TrackingNode::receiveDetections, this, std::placeholders::_1)); + + det3d_publisher_ = this->create_publisher( + this->get_parameter("publish_detections_topic").as_string(), 10); + marker_publisher_ = this->create_publisher( + this->get_parameter("publish_markers_topic").as_string(), 10); + pc_publisher_ = this->create_publisher( + this->get_parameter("publish_clusters_topic").as_string(), 10); + + tf_buffer_ = std::make_unique(this->get_clock()); + tf_listener_ = std::make_shared(*tf_buffer_); + + std::map clustering_params_map; + this->get_parameters("clustering_params", clustering_params_map); + clusteringParams = initializeClusteringParams(clustering_params_map); +} + +std::map TrackingNode::initializeClusteringParams( + const std::map &clustering_params_map) { + std::map clusteringParams; + for (auto iter = clustering_params_map.begin(); iter != clustering_params_map.end(); ++iter) { + std::string key = iter->first; + size_t split_loc = key.find_first_of('.'); + std::string det_type = key.substr(0, split_loc); + std::string field_type = key.substr(split_loc + 1); + + ClusteringParams params; + if (auto it = clusteringParams.find(det_type); it != clusteringParams.end()) { + params = it->second; + } + + if (field_type == "clustering_distances") + params.clustering_distances = iter->second.as_double_array(); + else if (field_type == "clustering_thresholds") + params.clustering_thresholds = iter->second.as_double_array(); + else if (field_type == "cluster_size_min") + params.cluster_size_min = iter->second.as_double(); + else if (field_type == "cluster_size_max") + params.cluster_size_max = iter->second.as_double(); + else if (field_type == "cluster_merge_threshold") + params.cluster_merge_threshold = iter->second.as_double(); + + clusteringParams[det_type] = params; + } + return clusteringParams; +} + +void TrackingNode::receiveLidar(const sensor_msgs::msg::PointCloud2::SharedPtr pointCloud_msg) { + // save latest lidar info + std::lock_guard guard_lidar(lidarCloud_mutex_); + + sensor_msgs::msg::PointCloud2 pointCloud = *pointCloud_msg; + pcl::fromROSMsg(pointCloud, *lidarCloud_); +} + +void TrackingNode::readCameraInfo(const sensor_msgs::msg::CameraInfo::SharedPtr msg) { + std::cout << msg->distortion_model << std::endl; + camInfo_ = msg; + camInfo_subscriber_.reset(); +} + +void TrackingNode::receiveDetections(const vision_msgs::msg::Detection2DArray::SharedPtr msg) { + if (!transformInited_) { + try { + transform_ = tf_buffer_->lookupTransform(cameraFrame_, lidarFrame_, tf2::TimePointZero); + transformInited_ = true; + } catch (const tf2::TransformException &ex) { + RCLCPP_INFO(this->get_logger(), "Could not transform %s", ex.what()); + return; + } + } + + // remove floor from lidar cloud + pcl::PointCloud::Ptr lidarCloudNoFloor(new pcl::PointCloud()); + { + std::lock_guard guard_lidar(lidarCloud_mutex_); + ProjectionUtils::removeFloor(lidarCloud_, lidarCloudNoFloor); + + if (lidarCloudNoFloor->size() == 0) + return; // do not process more, if all the points are floor points + RCLCPP_INFO(this->get_logger(), "receive detection %ld vs %ld", lidarCloud_->size(), + lidarCloudNoFloor->size()); + } + + // transform all lidar pts to 2d points in the camera + std::vector lidar2dProjs; + pcl::PointCloud::Ptr inCameraPoints(new pcl::PointCloud()); + for (const pcl::PointXYZ &pt : lidarCloudNoFloor->points) { + std::optional proj = + ProjectionUtils::projectLidarToCamera(transform_, camInfo_->p, pt); + if (proj) { + inCameraPoints->emplace_back(pt); + lidar2dProjs.emplace_back(*proj); + } + } + + // publish both rviz markers (for visualization) and a detection array + visualization_msgs::msg::MarkerArray markerArray3d; + vision_msgs::msg::Detection3DArray detArray3d; + pcl::PointCloud + mergedClusters; // coloured pc to visualize points in each cluster + + // process each detection in det array + int bboxId = 0; + for (const vision_msgs::msg::Detection2D &det : msg->detections) { + vision_msgs::msg::BoundingBox2D bbox = det.bbox; + + // process lidarcloud with extrinsic trans from lidar to cam frame + pcl::PointCloud::Ptr inlierPoints(new pcl::PointCloud()); + ProjectionUtils::pointsInBbox(inlierPoints, inCameraPoints, lidar2dProjs, bbox); + if (inlierPoints->size() == 0) { + RCLCPP_INFO(this->get_logger(), "no inliers found for detection %s", + det.results[0].hypothesis.class_id.c_str()); + continue; + } + + // clustering + auto params = getDefaultOrValue( + clusteringParams, det.results[0].hypothesis.class_id.c_str()); + auto clusterAndBBoxes = ProjectionUtils::getClusteredBBoxes(inlierPoints, params); + std::vector> clusters = clusterAndBBoxes.first; // needed? for viz purposes only + std::vector allBBoxes = clusterAndBBoxes.second; + + if (clusters.size() == 0 || allBBoxes.size() == 0) continue; + + // score each 3d bbox based on iou with teh original 2d bbox & the density + // of points in the cluster + int bestIndex = highestIOUScoredBBox(allBBoxes, bbox, clusters); + vision_msgs::msg::BoundingBox3D bestBBox = allBBoxes[bestIndex]; + + // for visauzliation : adds detection to cluster pc visualization & the + // marker array & the 3d detection array + mergedClusters += *(clusters[bestIndex]->getCloud()); + + vision_msgs::msg::Detection3D det3d; + det3d.bbox = bestBBox; + det3d.results = det.results; + detArray3d.detections.emplace_back(det3d); + + visualization_msgs::msg::Marker marker; + marker.type = visualization_msgs::msg::Marker::CUBE; + marker.scale = bestBBox.size; + marker.pose = bestBBox.center; + marker.header.frame_id = lidarFrame_; + marker.header.stamp = msg->header.stamp; + marker.id = bboxId; + markerArray3d.markers.push_back(marker); + ++bboxId; + } + + det3d_publisher_->publish(detArray3d); + marker_publisher_->publish(markerArray3d); + + sensor_msgs::msg::PointCloud2 pubCloud; + pcl::toROSMsg(mergedClusters, pubCloud); + pubCloud.header.frame_id = lidarFrame_; + pubCloud.header.stamp = msg->header.stamp; + + pc_publisher_->publish(pubCloud); + + RCLCPP_INFO(this->get_logger(), "published %ld 3d detections\n\n", markerArray3d.markers.size()); +} + +int TrackingNode::highestIOUScoredBBox(const std::vector bboxes, + const vision_msgs::msg::BoundingBox2D &detBBox, + const std::vector> &clusters) { + int bestScore = 0; + int bestBBox = 0; + + for (size_t i = 0; i < bboxes.size(); ++i) { + vision_msgs::msg::BoundingBox3D b = bboxes[i]; + // project the 3d corners of the bbox to 2d camera frame + pcl::PointXYZ top_left; + top_left.x = b.center.position.x - b.size.x / 2; + top_left.y = b.center.position.y - b.size.y / 2; + top_left.z = b.center.position.z - b.size.z / 2; + + pcl::PointXYZ bottom_right; + bottom_right.x = b.center.position.x + b.size.x / 2; + bottom_right.y = b.center.position.y + b.size.y / 2; + bottom_right.z = b.center.position.z + b.size.z / 2; + + std::optional top_left2d = + ProjectionUtils::projectLidarToCamera(transform_, camInfo_->p, top_left); + std::optional bottom_right2d = + ProjectionUtils::projectLidarToCamera(transform_, camInfo_->p, bottom_right); + + if (!top_left2d || !bottom_right2d) return 0; + + vision_msgs::msg::BoundingBox2D bbox2d; + bbox2d.center.position.x = ((top_left2d->x) + (bottom_right2d->x)) / 2; + bbox2d.center.position.y = ((top_left2d->y) + (bottom_right2d->y)) / 2; + bbox2d.size_x = abs(top_left2d->x - bottom_right2d->x); + bbox2d.size_y = abs(top_left2d->y - bottom_right2d->y); + + double iou = iouScore(bbox2d, detBBox); + + // score also includes density of points (num points/ volume) in the 3d bbox + double density = clusters[i]->size() / (b.size.x * b.size.y * b.size.z); + double score = iou + density; + + if (score > bestScore) { + bestScore = score; + bestBBox = i; + } + } + return bestBBox; +} + +double TrackingNode::overlapBoundingBox(const vision_msgs::msg::BoundingBox2D &boxA, + const vision_msgs::msg::BoundingBox2D &boxB) { + double overlapHeight = + std::min(boxA.center.position.y + boxA.size_y / 2, boxB.center.position.y + boxB.size_y / 2) - + std::max(boxA.center.position.y - boxA.size_y / 2, boxB.center.position.y - boxB.size_y / 2); + if (overlapHeight <= 0) return 0; + + double overlapWidth = + std::min(boxA.center.position.x + boxA.size_x / 2, boxB.center.position.x + boxB.size_x / 2) - + std::max(boxA.center.position.x - boxA.size_x / 2, boxB.center.position.x - boxB.size_x / 2); + if (overlapWidth <= 0) return 0; + + return overlapHeight * overlapWidth; +} + +double TrackingNode::iouScore(const vision_msgs::msg::BoundingBox2D &bboxA, + const vision_msgs::msg::BoundingBox2D &bboxB) { + double overlap = overlapBoundingBox(bboxA, bboxB); + return (overlap / (bboxA.size_x * bboxA.size_y + bboxB.size_x * bboxB.size_y - overlap)); +} + +int main(int argc, char **argv) { + rclcpp::init(argc, argv); + rclcpp::spin(std::make_shared()); + rclcpp::shutdown(); + return 0; +} diff --git a/src/perception/tracking/dets_2d_3d/src/projection_utils.cpp b/src/perception/tracking/dets_2d_3d/src/projection_utils.cpp new file mode 100644 index 00000000..4fc73234 --- /dev/null +++ b/src/perception/tracking/dets_2d_3d/src/projection_utils.cpp @@ -0,0 +1,261 @@ +#include "projection_utils.hpp" + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include + +typedef pcl::PointCloud Cloud; +typedef pcl::PointCloud ColorCloud; +typedef std::shared_ptr ClusterPtr; +typedef vision_msgs::msg::BoundingBox3D BBox3D; + +void ProjectionUtils::pointsInBbox(const Cloud::Ptr &inlierCloud, const Cloud::Ptr &lidarCloud, + const std::vector &projs2d, + const vision_msgs::msg::BoundingBox2D &bbox) { + for (size_t i = 0; i < projs2d.size(); ++i) { + // P * [x y z 1]^T, P is row major + if (isPointInBbox(projs2d[i], bbox)) inlierCloud->push_back(lidarCloud->points[i]); + } +} + +bool ProjectionUtils::isPointInBbox(const geometry_msgs::msg::Point &pt, + const vision_msgs::msg::BoundingBox2D &bbox) { + double padding = 0; + + if (bbox.center.position.x - bbox.size_x / 2 - padding < pt.x && + pt.x < bbox.center.position.x + bbox.size_x / 2 + padding && + bbox.center.position.y - bbox.size_y / 2 - padding < pt.y && + pt.y < bbox.center.position.y + bbox.size_y / 2 + padding) { + return true; + } + return false; +} + +std::optional ProjectionUtils::projectLidarToCamera( + const geometry_msgs::msg::TransformStamped &transform, const std::array &p, + const pcl::PointXYZ &pt) { + // lidar to camera frame + auto trans_pt = geometry_msgs::msg::Point(); + auto orig_pt = geometry_msgs::msg::Point(); + orig_pt.x = pt.x; + orig_pt.y = pt.y; + orig_pt.z = pt.z; + + tf2::doTransform(orig_pt, trans_pt, transform); + + if (trans_pt.z < 1) return std::nullopt; + + // camera frame to camera 2D projection + double u = p[0] * trans_pt.x + p[1] * trans_pt.y + p[2] * trans_pt.z + p[3]; + double v = p[4] * trans_pt.x + p[5] * trans_pt.y + p[6] * trans_pt.z + p[7]; + double w = p[8] * trans_pt.x + p[9] * trans_pt.y + p[10] * trans_pt.z + p[11]; + + auto proj_pt = geometry_msgs::msg::Point(); + proj_pt.x = u / w; + proj_pt.y = v / w; + + // check if inside camera frame bounds + if (proj_pt.x > 0 && proj_pt.x < 1600 && proj_pt.y > 0 && proj_pt.y < 900) return proj_pt; + return std::nullopt; +} + +// https://pointclouds.org/documentation/tutorials/progressive_morphological_filtering.html +void ProjectionUtils::removeFloor(const Cloud::Ptr &lidarCloud, const Cloud::Ptr &cloud_filtered) { + pcl::PointIndices::Ptr inliers(new pcl::PointIndices); + pcl::ModelCoefficients::Ptr coefficients(new pcl::ModelCoefficients); + + // Create the filtering object + pcl::SACSegmentation seg; + seg.setOptimizeCoefficients(true); + seg.setModelType(pcl::SACMODEL_PERPENDICULAR_PLANE); + seg.setMethodType(pcl::SAC_RANSAC); + seg.setMaxIterations(100); + seg.setAxis(Eigen::Vector3f(0, 0, 1)); + seg.setEpsAngle(0.1); + seg.setDistanceThreshold(0.5); // floor distance + seg.setOptimizeCoefficients(true); + seg.setInputCloud(lidarCloud); + seg.segment(*inliers, *coefficients); + + // Create the filtering object + pcl::ExtractIndices extract; + extract.setInputCloud(lidarCloud); + extract.setIndices(inliers); + + // Extract non-ground returns + extract.setNegative(true); + extract.filter(*cloud_filtered); +} + +std::pair, std::vector> ProjectionUtils::getClusteredBBoxes( + const Cloud::Ptr &lidarCloud, const ClusteringParams &clusteringParams) { + std::vector::Ptr> cloud_segments_array(5); + for (size_t i = 0; i < cloud_segments_array.size(); ++i) { + pcl::PointCloud::Ptr temp(new pcl::PointCloud()); + cloud_segments_array[i] = temp; + } + + // segment points into spherical shells from point cloud origin + for (const pcl::PointXYZ &pt : lidarCloud->points) { + float origin_distance = sqrt(pow(pt.x, 2) + pow(pt.y, 2)); + if (origin_distance < clusteringParams.clustering_distances[0]) + cloud_segments_array[0]->points.push_back(pt); + else if (origin_distance < clusteringParams.clustering_distances[1]) + cloud_segments_array[1]->points.push_back(pt); + else if (origin_distance < clusteringParams.clustering_distances[2]) + cloud_segments_array[2]->points.push_back(pt); + else if (origin_distance < clusteringParams.clustering_distances[3]) + cloud_segments_array[3]->points.push_back(pt); + else + cloud_segments_array[4]->points.push_back(pt); + } + + // get largest cluster in each shell + std::vector all_clusters; + for (unsigned int i = 1; i < cloud_segments_array.size(); i++) { + // add clusters from each shell + std::vector local_clusters = + clusterAndColor(cloud_segments_array[i], clusteringParams.clustering_thresholds[i], + clusteringParams.cluster_size_min, clusteringParams.cluster_size_max); + all_clusters.insert(all_clusters.end(), local_clusters.begin(), local_clusters.end()); + } + + // merge clusters if possible, do this twice? + std::vector mid_clusters = + (all_clusters.size() > 0) ? ProjectionUtils::checkAllForMerge( + all_clusters, clusteringParams.cluster_merge_threshold) + : all_clusters; + std::vector final_clusters = + (mid_clusters.size() > 0) ? ProjectionUtils::checkAllForMerge( + mid_clusters, clusteringParams.cluster_merge_threshold) + : mid_clusters; + + // get boundingboxes for each & return all possible 3d bboxes (if valid) + std::vector bboxes; + for (const ClusterPtr &cluster : final_clusters) { + BBox3D b = cluster->getBoundingBox(); + if (cluster->isValid()) bboxes.emplace_back(b); + } + return std::pair, std::vector>{final_clusters, bboxes}; +} + +std::vector ProjectionUtils::clusterAndColor(const Cloud::Ptr &in_cloud_ptr, + double in_max_cluster_distance, + double cluster_size_min, + double cluster_size_max) { + std::vector clusters; + if (in_cloud_ptr->size() == 0) return clusters; + + pcl::search::KdTree::Ptr tree(new pcl::search::KdTree()); + + // create 2d pc, by copying & making it flat + Cloud::Ptr cloud_2d(new pcl::PointCloud()); + pcl::copyPointCloud(*in_cloud_ptr, *cloud_2d); + for (size_t i = 0; i < cloud_2d->points.size(); i++) cloud_2d->points[i].z = 0; + + tree->setInputCloud(cloud_2d); + + std::vector cluster_indices; + + // perform clustering on 2d cloud : + // https://pcl.readthedocs.io/projects/tutorials/en/master/cluster_extraction.html + pcl::EuclideanClusterExtraction ec; + ec.setClusterTolerance(in_max_cluster_distance); + ec.setMinClusterSize(cluster_size_min); + ec.setMaxClusterSize(cluster_size_max); + ec.setSearchMethod(tree); + ec.setInputCloud(cloud_2d); + ec.extract(cluster_indices); + + // add pts at clustered indexes to cluster + for (const auto &cluster : cluster_indices) { + ClusterPtr cloud_cluster = std::make_shared(in_cloud_ptr, cluster.indices); + clusters.emplace_back(cloud_cluster); + } + + return clusters; +} + +void ProjectionUtils::checkClusterMerge(size_t in_cluster_id, + const std::vector &in_clusters, + std::vector &in_out_visited_clusters, + std::vector &out_merge_indices, + double in_merge_threshold) { + pcl::PointXYZ point_a = in_clusters[in_cluster_id]->getCentroid(); + + for (size_t i = 0; i < in_clusters.size(); i++) { + if (i != in_cluster_id && !in_out_visited_clusters[i]) { + pcl::PointXYZ point_b = in_clusters[i]->getCentroid(); + double distance = sqrt(pow(point_b.x - point_a.x, 2) + pow(point_b.y - point_a.y, 2)); + + if (distance <= in_merge_threshold) { + in_out_visited_clusters[i] = true; + out_merge_indices.push_back(i); + // look for all other clusters that can be merged with this merge-able + // cluster + checkClusterMerge(i, in_clusters, in_out_visited_clusters, out_merge_indices, + in_merge_threshold); + } + } + } +} + +ClusterPtr ProjectionUtils::mergeClusters(const std::vector &in_clusters, + const std::vector &in_merge_indices, + std::vector &in_out_merged_clusters) { + ColorCloud merged_cloud; + + // for each cluster in merge cloud indices, merge into larger cloud + for (size_t i = 0; i < in_merge_indices.size(); i++) { + merged_cloud += *(in_clusters[in_merge_indices[i]]->getCloud()); + in_out_merged_clusters[in_merge_indices[i]] = true; + } + + Cloud merged_cloud_uncoloured; + pcl::copyPointCloud(merged_cloud, merged_cloud_uncoloured); + + Cloud::Ptr merged_cloud_ptr(new Cloud(merged_cloud_uncoloured)); + + std::vector temp; + ClusterPtr merged_cluster = std::make_shared(merged_cloud_ptr, temp); + return merged_cluster; +} + +std::vector ProjectionUtils::checkAllForMerge( + const std::vector &in_clusters, float in_merge_threshold) { + std::vector out_clusters; + + std::vector visited_clusters(in_clusters.size(), false); + std::vector merged_clusters(in_clusters.size(), false); + + for (size_t i = 0; i < in_clusters.size(); i++) { + if (!visited_clusters[i]) { + visited_clusters[i] = true; + + std::vector merge_indices; + checkClusterMerge(i, in_clusters, visited_clusters, merge_indices, in_merge_threshold); + ClusterPtr mergedCluster = mergeClusters(in_clusters, merge_indices, merged_clusters); + + out_clusters.emplace_back(mergedCluster); + } + } + for (size_t i = 0; i < in_clusters.size(); i++) { + // check for clusters not merged, add them to the output + if (!merged_clusters[i]) { + out_clusters.push_back(in_clusters[i]); + } + } + + return out_clusters; +} diff --git a/src/perception/tracking/package.xml b/src/perception/tracking/package.xml deleted file mode 100644 index bff8967c..00000000 --- a/src/perception/tracking/package.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - tracking - 0.0.0 - TODO: Package description - bolty - TODO: License declaration - - ament_cmake - - - - ament_cmake - - diff --git a/src/perception/tracking/requirements.txt b/src/perception/tracking/requirements.txt new file mode 100644 index 00000000..2924f148 --- /dev/null +++ b/src/perception/tracking/requirements.txt @@ -0,0 +1,3 @@ +setuptools==65.5.1 +numpy==1.24.2 +scipy diff --git a/src/perception/tracking/tracking_viz/config/basic_config.yaml b/src/perception/tracking/tracking_viz/config/basic_config.yaml new file mode 100644 index 00000000..8f34133b --- /dev/null +++ b/src/perception/tracking/tracking_viz/config/basic_config.yaml @@ -0,0 +1,8 @@ +tracking_viz_node: + ros__parameters: + image_topic: /annotated_img + publish_viz_topic: /annotated_3d_img + det_3d_topic: /detections_3d + camera_info_topic: /CAM_FRONT/camera_info + camera_frame: CAM_FRONT + lidar_frame: LIDAR_TOP diff --git a/src/perception/tracking/tracking_viz/launch/basic_viz.launch.py b/src/perception/tracking/tracking_viz/launch/basic_viz.launch.py new file mode 100644 index 00000000..a8b2721b --- /dev/null +++ b/src/perception/tracking/tracking_viz/launch/basic_viz.launch.py @@ -0,0 +1,23 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + ld = LaunchDescription() + config = os.path.join( + get_package_share_directory('tracking_viz'), + 'config', + 'basic_config.yaml' + ) + + tracking_viz_node = Node( + package='tracking_viz', + executable='tracking_viz_node', + name='tracking_viz_node', + parameters=[config], + arguments=['--ros-args', '--log-level', 'info'] + ) + + return LaunchDescription([tracking_viz_node]) diff --git a/src/perception/tracking/tracking_viz/package.xml b/src/perception/tracking/tracking_viz/package.xml new file mode 100644 index 00000000..c585c087 --- /dev/null +++ b/src/perception/tracking/tracking_viz/package.xml @@ -0,0 +1,24 @@ + + + + tracking_viz + 0.0.0 + The tracking viz package + Steven Gong + TODO: License declaration + + cv_bridge + sensor_msgs + vision_msgs + std_msgs + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + + ament_python + + + diff --git a/src/perception/tracking/tracking_viz/resource/tracking_viz b/src/perception/tracking/tracking_viz/resource/tracking_viz new file mode 100755 index 00000000..e69de29b diff --git a/src/perception/tracking/tracking_viz/setup.cfg b/src/perception/tracking/tracking_viz/setup.cfg new file mode 100644 index 00000000..3cf91db1 --- /dev/null +++ b/src/perception/tracking/tracking_viz/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/tracking_viz +[install] +install_scripts=$base/lib/tracking_viz diff --git a/src/perception/tracking/tracking_viz/setup.py b/src/perception/tracking/tracking_viz/setup.py new file mode 100644 index 00000000..4249a7c1 --- /dev/null +++ b/src/perception/tracking/tracking_viz/setup.py @@ -0,0 +1,30 @@ +from setuptools import setup +import os +from glob import glob + +package_name = 'tracking_viz' + +setup( + name=package_name, + version='0.0.0', + packages=[package_name], + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + (os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')), + (os.path.join('share', package_name, 'config'), glob('config/*.yaml')), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='Meshva', + maintainer_email='meshvaD@watonomous.ca', + description='TODO: Package description', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'tracking_viz_node = tracking_viz.draw_tracks:main' + ], + }, +) diff --git a/src/perception/tracking/tracking_viz/tracking_viz/__init__.py b/src/perception/tracking/tracking_viz/tracking_viz/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py b/src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py new file mode 100755 index 00000000..af2ef097 --- /dev/null +++ b/src/perception/tracking/tracking_viz/tracking_viz/draw_tracks.py @@ -0,0 +1,204 @@ +import rclpy +from rclpy.node import Node +import os + +from geometry_msgs.msg import Pose +from sensor_msgs.msg import Image, CameraInfo +from vision_msgs.msg import Detection3DArray + +from rclpy.qos import QoSProfile, QoSReliabilityPolicy, QoSHistoryPolicy + +import tf2_geometry_msgs +from tf2_ros import TransformException +from tf2_ros.buffer import Buffer +from tf2_ros.transform_listener import TransformListener + +import cv2 +from cv_bridge import CvBridge, CvBridgeError +import numpy as np + +import time +from collections import deque +from multiprocessing import Lock +from scipy.spatial.transform import Rotation +from random import randint + +mutex = Lock() + +class DrawBasicDetections(Node): + def __init__(self): + super().__init__("tracking_viz_node") + self.get_logger().info("Creating tracking viz node...") + + self.declare_parameter("image_topic", "/image") + self.declare_parameter("publish_viz_topic", "/annotated_3d_det_img") + self.declare_parameter("det_3d_topic", "/det_3d_topic") + self.declare_parameter("camera_info_topic", "/camera_info") + self.declare_parameter("camera_frame", "/camera") + self.declare_parameter("lidar_frame", "/lidar") + + self.image_topic = self.get_parameter("image_topic").value + self.publish_viz_topic = self.get_parameter("publish_viz_topic").value + self.det_3d_topic = self.get_parameter("det_3d_topic").value + self.camera_info_topic = self.get_parameter("camera_info_topic").value + self.camera_frame = self.get_parameter("camera_frame").value + self.lidar_frame = self.get_parameter("lidar_frame").value + + # subscribes to images & 3D dets + self.unprocessed_images = deque() + self.unprocessed_dets = deque() + self.camera_info = None + self.transform = None + + self.tf_buffer = Buffer() + self.tf_listener = TransformListener(self.tf_buffer, self) + + self.cv_bridge = CvBridge() + + self.image_subscription = self.create_subscription( + Image, + self.image_topic, + self.image_callback, + qos_profile=QoSProfile( + reliability=QoSReliabilityPolicy.RELIABLE, + history=QoSHistoryPolicy.KEEP_LAST, + depth=10, + ), + ) + + self.det_3d_subscription = self.create_subscription( + Detection3DArray, + self.det_3d_topic, + self.det_3d_callback, + qos_profile=QoSProfile( + reliability=QoSReliabilityPolicy.RELIABLE, + history=QoSHistoryPolicy.KEEP_LAST, + depth=10, + ), + ) + + self.camera_info_subscription = self.create_subscription( + CameraInfo, + self.camera_info_topic, + self.camera_info_callback, + qos_profile=QoSProfile( + reliability=QoSReliabilityPolicy.RELIABLE, + history=QoSHistoryPolicy.KEEP_LAST, + depth=10, + ), + ) + + self.viz_publisher = self.create_publisher(Image, self.publish_viz_topic, 10) + + def image_callback(self, msg): + with mutex: + self.unprocessed_images.append(msg) + self.try_draw() + + def det_3d_callback(self, msg): + with mutex: + self.unprocessed_dets.append(msg) + + # get transform from lidar -> camera + if self.transform is None: + try: + self.transform = self.tf_buffer.lookup_transform( + self.camera_frame, + self.lidar_frame, + rclpy.time.Time()) + except TransformException as ex: + self.get_logger().info( + f'Could not transform from {self.lidar_frame} to {self.camera_frame}: {ex}') + return + + self.try_draw() + + def camera_info_callback(self, msg): + self.camera_info = np.array(msg.p).reshape(3, 4) + + self.get_logger().info(f"GOT CAMERA INFO... {self.camera_info}") + + self.destroy_subscription(self.camera_info_subscription) + + def try_draw(self): + if not self.unprocessed_images or not self.unprocessed_dets or self.transform is None or self.camera_info is None: + return + + with mutex: + image_msg = self.unprocessed_images.popleft() + det_3d_msg = self.unprocessed_dets.popleft() + + try: + image = self.cv_bridge.imgmsg_to_cv2(image_msg, desired_encoding="passthrough") + except CvBridgeError as e: + self.get_logger().error(str(e)) + return + + self.get_logger().info(f"PROCESSING IMAGE + DET3D...") + + for det_msg in det_3d_msg.detections: + bbox = det_msg.bbox + + center = np.array([bbox.center.position.x, bbox.center.position.y, bbox.center.position.z]) + rot = Rotation.from_quat([bbox.center.orientation.x, bbox.center.orientation.y, bbox.center.orientation.z, bbox.center.orientation.w]) + size = np.array([bbox.size.x, bbox.size.y, bbox.size.z]) + + # get all 8 corners + vert = [ center + rot.apply(np.multiply(size , np.array([-1, 1, 1]))), + center + rot.apply(np.multiply(size , np.array([-1, -1, 1]))), + center + rot.apply(np.multiply(size , np.array([-1, -1, -1]))), + center + rot.apply(np.multiply(size , np.array([-1, 1, -1]))), + center + rot.apply(np.multiply(size , np.array([1, 1, 1]))), + center + rot.apply(np.multiply(size , np.array([1, -1, 1]))), + center + rot.apply(np.multiply(size , np.array([1, -1, -1]))), + center + rot.apply(np.multiply(size , np.array([1, 1, -1]))), + ] + + color = (randint(0, 255), randint(0, 255), randint(0, 255)) + verts_2d = [] + + # project each 3d vert to 2d + for v in vert: + # convert v into a pos2d message + v_msg = Pose() + v_msg.position.x = v[0] + v_msg.position.y = v[1] + v_msg.position.z = v[2] + + # lidar to camera frame + v_trans = tf2_geometry_msgs.do_transform_pose(v_msg, self.transform) + v_trans = np.array([v_trans.position.x, v_trans.position.y, v_trans.position.z, 1]) + + # project 3d camera frame to 2d camera plane + v_2d = self.camera_info @ v_trans + v_2d = np.array([int(v_2d[0] / v_2d[2]), int(v_2d[1] / v_2d[2])]) + verts_2d.append(v_2d) + + # draw vertex onto image + # image = cv2.circle(image, v_2d, 5, color, thickness=-1) + + # draw edges + for i in range(4): + image = cv2.line(image, verts_2d[i], verts_2d[(i+1) % 4], color, 10) # face 1 + image = cv2.line(image, verts_2d[i+4], verts_2d[(i+1) % 4 + 4], color, 10) # face 2 + image = cv2.line(image, verts_2d[i], verts_2d[i+4], color, 10) # connect faces + + self.publish_viz(image, image_msg) + + def publish_viz(self, cv_img, msg): + imgmsg = self.cv_bridge.cv2_to_imgmsg(cv_img, "bgr8") + imgmsg.header.stamp = msg.header.stamp + imgmsg.header.frame_id = msg.header.frame_id + self.viz_publisher.publish(imgmsg) + +def main(args=None): + rclpy.init(args=args) + + tracking_viz_node = DrawBasicDetections() + rclpy.spin(tracking_viz_node) + tracking_viz_node.destroy_node() + rclpy.shutdown() + + +if __name__ == "__main__": + main() diff --git a/src/samples/python/aggregator/setup.py b/src/samples/python/aggregator/setup.py index b0afb9f6..f77c1804 100755 --- a/src/samples/python/aggregator/setup.py +++ b/src/samples/python/aggregator/setup.py @@ -14,7 +14,7 @@ # Include our package.xml file (os.path.join('share', package_name), ['package.xml']), # Include all launch files. - (os.path.join('share', package_name, 'launch'), \ + (os.path.join('share', package_name, 'launch'), glob(os.path.join('launch', '*.launch.py'))), ], install_requires=['setuptools'],