Skip to content

Commit 2492dd3

Browse files
Merge pull request #181 from StanfordASL/skillblender
Add skillblender and sms abstract
2 parents cb40450 + 823e7a7 commit 2492dd3

File tree

1 file changed

+12
-0
lines changed

1 file changed

+12
-0
lines changed

_bibliography/ASL_Bib.bib

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3578,6 +3578,17 @@ @inproceedings{KuwataPavoneEtAl2012
35783578
url = {/wp-content/papercite-data/pdf/Kuwata.Pavone.ea.CDC12.pdf}
35793579
}
35803580

3581+
@article{KuangEtAl2025,
3582+
author = {Kuang, Y. and Geng, H. and Elhafsi, A. and Do, T. and Abbeel, P. and Malik, J. and Pavone, M. and Wang, Y.},
3583+
title = {SkillBlender: Towards Versatile Humanoid Whole-Body Loco-Manipulation via Skill Blending},
3584+
year = {2025},
3585+
abstract = {Humanoid robots hold significant potential in accomplishing daily tasks across diverse environments thanks to their flexibility and human-like morphology. Recent works have made significant progress in humanoid whole-body control and loco-manipulation leveraging optimal control or reinforcement learning. However, these methods require tedious task-specific tuning for each task to achieve satisfactory behaviors, limiting their versatility and scalability to diverse tasks in daily scenarios. To that end, we introduce SkillBlender, a novel hierarchical reinforcement learning framework for versatile humanoid loco-manipulation. SkillBlender first pretrains goal-conditioned task-agnostic primitive skills, and then dynamically blends these skills to accomplish complex loco-manipulation tasks with minimal task-specific reward engineering. We also introduce SkillBench, a parallel, cross-embodiment, and diverse simulated benchmark containing three embodiments, four primitive skills, and eight challenging loco-manipulation tasks, accompanied by a set of scientific evaluation metrics balancing accuracy and feasibility. Extensive simulated experiments show that our method significantly outperforms all baselines, while naturally regularizing behaviors to avoid reward hacking, resulting in more accurate and feasible movements for diverse loco-manipulation tasks in our daily scenarios. Our code and benchmark will be open-sourced to the community to facilitate future research.},
3586+
journal = {CoRL 2024 Workshop on Whole-body Control and Bimanual Manipulation},
3587+
url = {https://arxiv.org/abs/2506.09366},
3588+
owner = {amine},
3589+
timestamp = {2025-06-11}
3590+
}
3591+
35813592
@inproceedings{KoenigPavoneEtAl2014,
35823593
author = {Koenig, Adam W. and Pavone, M. and Castillo-Rogez, Julie C. and Nesnas, I. A. D.},
35833594
title = {A Dynamical Characterization of Internally-Actuated Microgravity Mobility Systems},
@@ -4536,6 +4547,7 @@ @article{ElhafsiMortonPavone2025
45364547
title = {Scan, Materialize, Simulate: A Generalizable Framework for Physically Grounded Robot Planning},
45374548
year = {2025},
45384549
journal = {ArXiv 2505.14938},
4550+
abstract = {Autonomous robots must reason about the physical consequences of their actions to operate effectively in unstructured, real-world environments. We present Scan, Materialize, Simulate (SMS), a unified framework that combines 3D Gaussian Splatting for accurate scene reconstruction, visual foundation models for semantic segmentation, vision-language models for material property inference, and physics simulation for reliable prediction of action outcomes. By integrating these components, SMS enables generalizable physical reasoning and object-centric planning without the need to re-learn foundational physical dynamics. We empirically validate SMS in a billiards-inspired manipulation task and a challenging quadrotor landing scenario, demonstrating robust performance on both simulated domain transfer and real-world experiments. Our results highlight the potential of bridging differentiable rendering for scene reconstruction, foundation models for semantic understanding, and physics-based simulation to achieve physically grounded robot planning across diverse settings.},
45394551
url = {https://arxiv.org/pdf/2505.14938},
45404552
keywords = {sub},
45414553
owner = {amine},

0 commit comments

Comments
 (0)