Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion exts/nav_tasks/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]

# Note: Semantic Versioning is used: https://semver.org/
version = "0.3.11"
version = "0.3.12"

# Description
title = "IsaacLab Navigation RL Tasks"
Expand Down
8 changes: 8 additions & 0 deletions exts/nav_tasks/docs/CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,14 @@
Changelog
---------

0.3.12 (2025-08-23)
~~~~~~~~~~~~~~~~~~

Fixed
^^^^^

- Fixed example RL training action scale and offset in :class:`nav_tasks.configs.env_cfg_base.ActionsCfg` for the low level action.


0.3.11 (2025-08-13)
~~~~~~~~~~~~~~~~~~
Expand Down
5 changes: 4 additions & 1 deletion exts/nav_tasks/nav_tasks/configs/env_cfg_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,9 @@ def __post_init__(self):
self.front_zed_camera, IMAGE_SIZE_DOWNSAMPLE_FACTOR, IMAGE_SIZE_DOWNSAMPLE_FACTOR
)

# turn off the self-collisions
self.robot.spawn.articulation_props.enabled_self_collisions = False


##
# MDP settings
Expand All @@ -125,7 +128,7 @@ class ActionsCfg:
velocity_command = mdp.NavigationSE2ActionCfg(
asset_name="robot",
low_level_action=mdp.JointPositionActionCfg(
asset_name="robot", joint_names=[".*"], scale=1.0, use_default_offset=False
asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True
),
low_level_policy_file=ISAACLAB_NUCLEUS_DIR + "/Policies/ANYmal-C/HeightScan/policy.pt",
)
Expand Down
19 changes: 17 additions & 2 deletions scripts/nav_tasks/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,11 @@

# This file has been adapted from https://github.com/isaac-sim/IsaacLab/blob/main/scripts/reinforcement_learning/rsl_rl/train.py

# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

"""Script to train RL agent with RSL-RL."""

"""Launch Isaac Sim Simulator first."""
Expand All @@ -29,11 +30,15 @@
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument(
"--agent", type=str, default="rsl_rl_cfg_entry_point", help="Name of the RL agent configuration entry point."
)
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
parser.add_argument(
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
)
parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.")
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
Expand Down Expand Up @@ -81,6 +86,7 @@
from datetime import datetime

import isaaclab_tasks # noqa: F401
import omni
from isaaclab.envs import (
DirectMARLEnv,
DirectMARLEnvCfg,
Expand All @@ -104,7 +110,7 @@
torch.backends.cudnn.benchmark = False


@hydra_task_config(args_cli.task, "rsl_rl_cfg_entry_point")
@hydra_task_config(args_cli.task, args_cli.agent)
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: RslRlOnPolicyRunnerCfg):
"""Train with RSL-RL agent."""
# override configurations with non-hydra CLI arguments
Expand Down Expand Up @@ -141,6 +147,15 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen
log_dir += f"_{agent_cfg.run_name}"
log_dir = os.path.join(log_root_path, log_dir)

# set the IO descriptors output directory if requested
if isinstance(env_cfg, ManagerBasedRLEnvCfg):
env_cfg.export_io_descriptors = args_cli.export_io_descriptors
env_cfg.io_descriptors_output_dir = log_dir
else:
omni.log.warn(
"IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported."
)

# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)

Expand Down
Loading