diff --git a/README.md b/README.md
index 832a21d..83a7937 100644
--- a/README.md
+++ b/README.md
@@ -124,7 +124,7 @@ The latest model is available to download: [[checkpoint](https://drive.google.co
This code belongs to Robotic Systems Lab, ETH Zurich.
All right reserved
-**Authors: [Pascal Roth](https://github.com/pascal-roth), [Julian Nubert](https://juliannubert.com/), [Fan Yang](https://github.com/MichaelFYang), [Mayank Mittal](https://mayankm96.github.io/), [Ziqi Fan](https://github.com/fan-ziqi), and [Marco Hutter](https://rsl.ethz.ch/the-lab/people/person-detail.MTIxOTEx.TGlzdC8yNDQxLC0xNDI1MTk1NzM1.html)
+**Authors: [Pascal Roth](https://github.com/pascal-roth), [Julian Nubert](https://juliannubert.com/), [Fan Yang](https://github.com/MichaelFYang), [Mayank Mittal](https://mayankm96.github.io/), and [Marco Hutter](https://rsl.ethz.ch/the-lab/people/person-detail.MTIxOTEx.TGlzdC8yNDQxLC0xNDI1MTk1NzM1.html)
Maintainer: Pascal Roth, rothpa@ethz.ch**
The ViPlanner package has been tested under ROS Noetic on Ubuntu 20.04.
diff --git a/omniverse/extension/omni.viplanner/omni/viplanner/collectors/viewpoint_sampling.py b/omniverse/extension/omni.viplanner/omni/viplanner/collectors/viewpoint_sampling.py
index b2b72ab..896e74e 100644
--- a/omniverse/extension/omni.viplanner/omni/viplanner/collectors/viewpoint_sampling.py
+++ b/omniverse/extension/omni.viplanner/omni/viplanner/collectors/viewpoint_sampling.py
@@ -81,9 +81,9 @@ def sample_viewpoints(self, nbr_viewpoints: int, seed: int = 1) -> torch.Tensor:
sample_idx_select = torch.randperm(sample_idx.sum())[
: min(nbr_samples_per_point, nbr_viewpoints - sample_locations_count)
]
- sample_locations[
- sample_locations_count : sample_locations_count + sample_idx_select.shape[0]
- ] = self.terrain_analyser.samples[sample_idx][sample_idx_select, :2]
+ sample_locations[sample_locations_count : sample_locations_count + sample_idx_select.shape[0]] = (
+ self.terrain_analyser.samples[sample_idx][sample_idx_select, :2]
+ )
sample_locations_count += sample_idx_select.shape[0]
curr_point_idx += 1
# reset point index if all points are sampled
@@ -157,6 +157,8 @@ def render_viewpoints(self, samples: torch.Tensor):
# create directories
os.makedirs(os.path.join(filedir, "semantics"), exist_ok=True)
os.makedirs(os.path.join(filedir, "depth"), exist_ok=True)
+ if "rgb" in self.cfg.cameras.values():
+ os.makedirs(os.path.join(filedir, "rgb"), exist_ok=True)
# save camera configurations
print(f"[INFO] Saving camera configurations to {filedir}.")
@@ -206,16 +208,18 @@ def render_viewpoints(self, samples: torch.Tensor):
# save images
for idx in range(samples_idx.shape[0]):
# semantic segmentation
- if annotator == "semantic_segmentation":
+ if annotator == "semantic_segmentation" or annotator == "rgb":
if image_data_np.shape[-1] == 1:
# get info data
info = self.scene.sensors[cam].data.info[idx][annotator]["idToLabels"]
# assign each key a color from the VIPlanner color space
info = {
- int(k): self.viplanner_sem_meta.class_color["static"]
- if v["class"] in ("BACKGROUND", "UNLABELLED")
- else self.viplanner_sem_meta.class_color[v["class"]]
+ int(k): (
+ self.viplanner_sem_meta.class_color["static"]
+ if v["class"] in ("BACKGROUND", "UNLABELLED")
+ else self.viplanner_sem_meta.class_color[v["class"]]
+ )
for k, v in info.items()
}
@@ -232,7 +236,11 @@ def render_viewpoints(self, samples: torch.Tensor):
output = image_data_np[idx]
assert cv2.imwrite(
- os.path.join(filedir, "semantics", f"{image_idx[cam_idx]}".zfill(4) + ".png"),
+ os.path.join(
+ filedir,
+ "semantics" if annotator == "semantic_segmentation" else "rgb",
+ f"{image_idx[cam_idx]}".zfill(4) + ".png"
+ ),
cv2.cvtColor(output.astype(np.uint8), cv2.COLOR_RGB2BGR),
)
# depth
diff --git a/omniverse/extension/omni.viplanner/omni/viplanner/config/carla_cfg.py b/omniverse/extension/omni.viplanner/omni/viplanner/config/carla_cfg.py
index bf32be5..9b62cb7 100644
--- a/omniverse/extension/omni.viplanner/omni/viplanner/config/carla_cfg.py
+++ b/omniverse/extension/omni.viplanner/omni/viplanner/config/carla_cfg.py
@@ -82,6 +82,8 @@ class TerrainSceneCfg(InteractiveSceneCfg):
height=480,
data_types=["distance_to_image_plane"],
)
+
+ # NOTE: remove "rgb" from the data_types to only render the semantic segmentation
semantic_camera = CameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base/semantic_camera",
offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(-0.5, 0.5, -0.5, 0.5)),
@@ -91,7 +93,7 @@ class TerrainSceneCfg(InteractiveSceneCfg):
),
width=1280,
height=720,
- data_types=["semantic_segmentation"],
+ data_types=["semantic_segmentation", "rgb"],
colorize_semantic_segmentation=False,
)
diff --git a/omniverse/extension/omni.viplanner/omni/viplanner/config/matterport_cfg.py b/omniverse/extension/omni.viplanner/omni/viplanner/config/matterport_cfg.py
index d886263..b132f92 100644
--- a/omniverse/extension/omni.viplanner/omni/viplanner/config/matterport_cfg.py
+++ b/omniverse/extension/omni.viplanner/omni/viplanner/config/matterport_cfg.py
@@ -11,7 +11,7 @@
from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
-from omni.isaac.lab.sensors import ContactSensorCfg, patterns
+from omni.isaac.lab.sensors import ContactSensorCfg, patterns, CameraCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.matterport.config import MatterportImporterCfg
from omni.isaac.matterport.domains import MatterportRayCasterCfg
@@ -121,6 +121,17 @@ class TerrainSceneCfg(InteractiveSceneCfg):
debug_vis=False,
mesh_prim_paths=["${USER_PATH_TO_USD}/matterport.ply"],
)
+
+ # NOTE: comment the following lines to only render the semantic segmentation and depth images
+ rgb_camera = CameraCfg(
+ prim_path="{ENV_REGEX_NS}/Robot/base/rgb_camera",
+ offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(-0.5, 0.5, -0.5, 0.5)),
+ spawn=sim_utils.PinholeCameraCfg(),
+ width=1280,
+ height=720,
+ data_types=["rgb"],
+ colorize_semantic_segmentation=False,
+ )
@configclass
diff --git a/omniverse/extension/omni.viplanner/omni/viplanner/config/warehouse_cfg.py b/omniverse/extension/omni.viplanner/omni/viplanner/config/warehouse_cfg.py
index 633617b..17a7683 100644
--- a/omniverse/extension/omni.viplanner/omni/viplanner/config/warehouse_cfg.py
+++ b/omniverse/extension/omni.viplanner/omni/viplanner/config/warehouse_cfg.py
@@ -80,13 +80,15 @@ class TerrainSceneCfg(InteractiveSceneCfg):
height=480,
data_types=["distance_to_image_plane"],
)
+
+ # NOTE: remove "rgb" from the data_types to only render the semantic segmentation
semantic_camera = CameraCfg(
prim_path="{ENV_REGEX_NS}/Robot/base/semantic_camera",
offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(-0.5, 0.5, -0.5, 0.5)),
spawn=sim_utils.PinholeCameraCfg(),
width=1280,
height=720,
- data_types=["semantic_segmentation"],
+ data_types=["semantic_segmentation", "rgb"],
colorize_semantic_segmentation=False,
)
diff --git a/omniverse/standalone/data_collect.py b/omniverse/standalone/data_collect.py
index 8d484b5..8befbcf 100644
--- a/omniverse/standalone/data_collect.py
+++ b/omniverse/standalone/data_collect.py
@@ -97,6 +97,13 @@ def main():
"semantic_camera": "semantic_segmentation",
}
+ # adustments if also RGB images should be rendered
+ if args_cli.scene == "matterport" and hasattr(scene_cfg, "rgb_camera"):
+ scene_cfg.rgb_camera.prim_path = "/World/rgb_camera"
+ cfg.cameras["rgb_camera"] = "rgb"
+ elif "rgb" in scene_cfg.semantic_camera.data_types:
+ cfg.cameras["semantic_camera"] = "rgb"
+
# Load kit helper
sim_cfg = sim_utils.SimulationCfg()
sim = SimulationContext(sim_cfg)