Skip to content

Commit 20d2748

Browse files
committed
Delay FC for async payments
Previously, `should_broadcast_holder_commitment_txn` would FC a channel if an outbound HTLC that hasn't been resolved was `LATENCY_GRACE_PERIOD_BLOCKS` past expiry. In the case of an async payment, we can delay the force-closure since we are not in a race to claim an inbound HTLC. For cases in which a node has been offline for a while, this could help to fail the HTLC on reconnection instead of causing a FC. Here we give an extra 4032 blocks which is roughly 4 weeks.
1 parent 5e24272 commit 20d2748

File tree

3 files changed

+114
-7
lines changed

3 files changed

+114
-7
lines changed

lightning/src/chain/channelmonitor.rs

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ use crate::chain::transaction::{OutPoint, TransactionData};
4545
use crate::chain::Filter;
4646
use crate::chain::{BestBlock, WatchedOutput};
4747
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};
48-
use crate::events::{ClosureReason, Event, EventHandler, ReplayEvent};
48+
use crate::events::{ClosureReason, Event, EventHandler, PaidBolt12Invoice, ReplayEvent};
4949
use crate::ln::chan_utils::{
5050
self, ChannelTransactionParameters, CommitmentTransaction, CounterpartyCommitmentSecrets,
5151
HTLCClaim, HTLCOutputInCommitment, HolderCommitmentTransaction,
@@ -5964,9 +5964,12 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
59645964
// updates that peer sends us are update_fails, failing the channel if not. It's probably
59655965
// easier to just fail the channel as this case should be rare enough anyway.
59665966
let height = self.best_block.height;
5967+
// Grace period in number of blocks we allow for an async payment to resolve before we
5968+
// force-close. 4032 blocks are roughly four weeks.
5969+
const ASYNC_PAYMENT_GRACE_PERIOD_BLOCKS: u32 = 4032;
59675970
macro_rules! scan_commitment {
59685971
($htlcs: expr, $holder_tx: expr) => {
5969-
for ref htlc in $htlcs {
5972+
for (ref htlc, ref source) in $htlcs {
59705973
// For inbound HTLCs which we know the preimage for, we have to ensure we hit the
59715974
// chain with enough room to claim the HTLC without our counterparty being able to
59725975
// time out the HTLC first.
@@ -5977,9 +5980,21 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
59775980
// can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
59785981
// chain when our counterparty is waiting for expiration to off-chain fail an HTLC
59795982
// we give ourselves a few blocks of headroom after expiration before going
5980-
// on-chain for an expired HTLC.
5983+
// on-chain for an expired HTLC. In the case of an outbound HTLC for
5984+
// an async payment, we allow `ASYNC_PAYMENT_GRACE_PERIOD_BLOCKS` before
5985+
// we force-close the channel so that if we've been offline for a
5986+
// while we give a chance for the HTLC to be failed on reconnect
5987+
// instead closing the channel.
59815988
let htlc_outbound = $holder_tx == htlc.offered;
5982-
if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
5989+
let async_payment = htlc_outbound && matches!(
5990+
source.as_deref().expect("Every outbound HTLC should have a corresponding source"),
5991+
HTLCSource::OutboundRoute {
5992+
bolt12_invoice: Some(PaidBolt12Invoice::StaticInvoice(_)),
5993+
..
5994+
}
5995+
);
5996+
if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height && !async_payment) ||
5997+
( htlc_outbound && htlc.cltv_expiry + ASYNC_PAYMENT_GRACE_PERIOD_BLOCKS <= height && async_payment) ||
59835998
(!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
59845999
log_info!(logger, "Force-closing channel due to {} HTLC timeout - HTLC with payment hash {} expires at {}", if htlc_outbound { "outbound" } else { "inbound"}, htlc.payment_hash, htlc.cltv_expiry);
59856000
return Some(htlc.payment_hash);
@@ -5988,16 +6003,16 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
59886003
}
59896004
}
59906005

5991-
scan_commitment!(holder_commitment_htlcs!(self, CURRENT), true);
6006+
scan_commitment!(holder_commitment_htlcs!(self, CURRENT_WITH_SOURCES), true);
59926007

59936008
if let Some(ref txid) = self.funding.current_counterparty_commitment_txid {
59946009
if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) {
5995-
scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
6010+
scan_commitment!(htlc_outputs.iter(), false);
59966011
}
59976012
}
59986013
if let Some(ref txid) = self.funding.prev_counterparty_commitment_txid {
59996014
if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) {
6000-
scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
6015+
scan_commitment!(htlc_outputs.iter(), false);
60016016
}
60026017
}
60036018

lightning/src/ln/async_payments_tests.rs

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3031,6 +3031,92 @@ fn held_htlc_timeout() {
30313031
);
30323032
}
30333033

3034+
#[test]
3035+
fn fail_held_htlc_on_reconnect() {
3036+
// Test that if a held HTLC by the sender LSP fails but the async sender is offline, the HTLC
3037+
// is failed on reconnect instead of FC the channel.
3038+
let chanmon_cfgs = create_chanmon_cfgs(4);
3039+
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
3040+
3041+
let (sender_cfg, recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg());
3042+
let mut sender_lsp_cfg = test_default_channel_config();
3043+
sender_lsp_cfg.enable_htlc_hold = true;
3044+
let mut invoice_server_cfg = test_default_channel_config();
3045+
invoice_server_cfg.accept_forwards_to_priv_channels = true;
3046+
3047+
let node_chanmgrs = create_node_chanmgrs(
3048+
4,
3049+
&node_cfgs,
3050+
&[Some(sender_cfg), Some(sender_lsp_cfg), Some(invoice_server_cfg), Some(recipient_cfg)],
3051+
);
3052+
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
3053+
let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
3054+
create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
3055+
create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
3056+
unify_blockheight_across_nodes(&nodes);
3057+
let sender = &nodes[0];
3058+
let sender_lsp = &nodes[1];
3059+
let invoice_server = &nodes[2];
3060+
let recipient = &nodes[3];
3061+
3062+
let amt_msat = 5000;
3063+
let (_, peer_node_id, static_invoice_om) = build_async_offer_and_init_payment(amt_msat, &nodes);
3064+
let payment_hash =
3065+
lock_in_htlc_for_static_invoice(&static_invoice_om, peer_node_id, sender, sender_lsp);
3066+
3067+
sender_lsp.node.process_pending_htlc_forwards();
3068+
let (peer_id, held_htlc_om) =
3069+
extract_held_htlc_available_oms(sender, &[sender_lsp, invoice_server, recipient])
3070+
.pop()
3071+
.unwrap();
3072+
recipient.onion_messenger.handle_onion_message(peer_id, &held_htlc_om);
3073+
3074+
let _ = extract_release_htlc_oms(recipient, &[sender, sender_lsp, invoice_server]);
3075+
3076+
// Disconnect async sender <-> sender LSP
3077+
sender.node.peer_disconnected(sender_lsp.node.get_our_node_id());
3078+
sender_lsp.node.peer_disconnected(sender.node.get_our_node_id());
3079+
3080+
// Connect blocks such that they cause the HTLC to timeout
3081+
let chan_id = chan.0.channel_id;
3082+
let channel =
3083+
sender.node.list_channels().iter().find(|c| c.channel_id == chan_id).unwrap().clone();
3084+
let htlc_expiry = channel
3085+
.pending_outbound_htlcs
3086+
.iter()
3087+
.find(|htlc| htlc.payment_hash == payment_hash)
3088+
.unwrap()
3089+
.cltv_expiry;
3090+
let blocks_to_connect = htlc_expiry - sender.best_block_info().1 + 100;
3091+
connect_blocks(sender, blocks_to_connect);
3092+
connect_blocks(sender_lsp, blocks_to_connect);
3093+
3094+
sender_lsp.node.process_pending_htlc_forwards();
3095+
let mut evs = sender_lsp.node.get_and_clear_pending_events();
3096+
assert_eq!(evs.len(), 1);
3097+
match evs.pop().unwrap() {
3098+
Event::HTLCHandlingFailed { failure_type, failure_reason, .. } => {
3099+
assert!(matches!(failure_type, HTLCHandlingFailureType::InvalidForward { .. }));
3100+
assert!(matches!(
3101+
failure_reason,
3102+
Some(HTLCHandlingFailureReason::Local {
3103+
reason: LocalHTLCFailureReason::ForwardExpiryBuffer
3104+
})
3105+
));
3106+
},
3107+
_ => panic!(),
3108+
}
3109+
3110+
// After reconnecting, check that HTLC was failed and channel is open.
3111+
let mut reconnect_args = ReconnectArgs::new(&sender, &sender_lsp);
3112+
reconnect_args.pending_cell_htlc_fails.0 = 1;
3113+
reconnect_nodes(reconnect_args);
3114+
3115+
expect_payment_failed!(sender, payment_hash, false);
3116+
assert_eq!(sender.node.list_channels().len(), 1);
3117+
assert_eq!(sender_lsp.node.list_channels().len(), 2);
3118+
}
3119+
30343120
#[test]
30353121
fn intercepted_hold_htlc() {
30363122
// Test a payment `sender --> LSP --> recipient` such that the HTLC is both a hold htlc and an

lightning/src/ln/channelmanager.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12790,6 +12790,12 @@ where
1279012790
/// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
1279112791
/// to construct a [`BlindedMessagePath`] for the reply path.
1279212792
///
12793+
/// # Note
12794+
///
12795+
/// When paying an offer to an async recipient, a failed payment attempt could end up in a
12796+
/// pending state if we are offline. In this case, we allow for an extended grace period
12797+
/// to resolve it with our peer when we come back online.
12798+
///
1279312799
/// # Errors
1279412800
///
1279512801
/// Errors if:

0 commit comments

Comments
 (0)