Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion exts/nav_tasks/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]

# Note: Semantic Versioning is used: https://semver.org/
version = "0.3.10"
version = "0.3.11"

# Description
title = "IsaacLab Navigation RL Tasks"
Expand Down
16 changes: 16 additions & 0 deletions exts/nav_tasks/docs/CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,22 @@
Changelog
---------


0.3.11 (2025-08-13)
~~~~~~~~~~~~~~~~~~

Added
^^^^^

- Added ``freeze_low_level_policy`` option to :class:`nav_tasks.mdp.actions.NavigationSE2ActionCfg` to allow to not freeze the low level policy.
- Added ``nan_fill_value`` option to :func:`nav_tasks.mdp.observations.camera_observations:camera_image` to allow to fill NaNs with a specific value.

Changed
^^^^^^^

- Updated :func:`nav_tasks.mdp.observations.height_scan_observations:height_scan_clipped` to make clipping optional.


0.3.10 (2025-08-08)
~~~~~~~~~~~~~~~~~~

Expand Down
4 changes: 3 additions & 1 deletion exts/nav_tasks/nav_tasks/mdp/actions/navigation_actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,9 @@ def __init__(self, cfg: NavigationSE2ActionCfg, env: ManagerBasedRLEnv):
# load policies
file_bytes = read_file(self.cfg.low_level_policy_file)
self.low_level_policy = torch.jit.load(file_bytes, map_location=self.device)
self.low_level_policy = torch.jit.freeze(self.low_level_policy.eval())
self.low_level_policy.eval()
if self.cfg.freeze_low_level_policy:
self.low_level_policy = torch.jit.freeze(self.low_level_policy)

# prepare joint position actions
if not isinstance(self.cfg.low_level_action, list):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ class NavigationSE2ActionCfg(ActionTermCfg):
low_level_policy_file: str = MISSING
"""Path to the low level policy file."""

freeze_low_level_policy: bool = True
"""Whether to freeze the low level policy.

Can improve performance but will also eliminate possible functions such as `reset`."""

low_level_obs_group: str = "low_level_policy"
"""Observation group of the low level policy."""

Expand Down
12 changes: 9 additions & 3 deletions exts/nav_tasks/nav_tasks/mdp/observations/camera_observations.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,11 @@


def camera_image(
env: ManagerBasedEnv, sensor_cfg: SceneEntityCfg, data_type: str = "distance_to_image_plane", flatten: bool = False
env: ManagerBasedEnv,
sensor_cfg: SceneEntityCfg,
data_type: str = "distance_to_image_plane",
flatten: bool = False,
nan_fill_value: float | None = None,
) -> torch.Tensor:
"""Camera image Observations.

Expand All @@ -34,6 +38,7 @@ def camera_image(
sensor_cfg: The name of the sensor.
data_type: The type of data to extract from the sensor. Default is "distance_to_image_plane".
flatten: If True, the image will be flattened to 1D. Default is False.
nan_fill_value: The value to fill nan/inf values with. If None, the maximum distance of the sensor will be used.

Returns:
The image data."""
Expand All @@ -43,8 +48,9 @@ def camera_image(
img = sensor.data.output[data_type].clone()

if data_type == "distance_to_image_plane":
img[torch.isnan(img)] = sensor.cfg.max_distance
img[torch.isinf(img)] = sensor.cfg.max_distance
if nan_fill_value is None:
nan_fill_value = sensor.cfg.max_distance
img = torch.nan_to_num(img, nan=nan_fill_value, posinf=nan_fill_value, neginf=0.0)

# if type torch.uint8, convert to float and scale between 0 and 1
if img.dtype == torch.uint8:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,16 +49,15 @@ def height_scan_clipped(
env: ManagerBasedRLEnv,
sensor_cfg: SceneEntityCfg,
offset: float = 0.5,
clip_height: tuple[float, float] = (-1.0, 0.5),
clip_height: tuple[float, float] | None = (-1.0, 0.5),
) -> torch.Tensor:
"""Height scan from the given sensor w.r.t. the sensor's frame.

The provided offset (Defaults to 0.5) is subtracted from the returned values.
"""
"""Height scan from the given sensor w.r.t. the sensor's frame."""
# get the bounded height scan
height = height_scan_bounded(env, sensor_cfg, offset)
# clip to max observable height
return torch.clip(height, clip_height[0], clip_height[1])
if clip_height is not None:
# clip to max observable height
height = torch.clip(height, clip_height[0], clip_height[1])
return height


def height_scan_square(
Expand Down
Loading