Skip to content

Instantly share code, notes, and snippets.

@bshramin
Created March 4, 2023 23:09
Show Gist options
  • Save bshramin/79db938acb8c71d3a1981e1eb87cb3fe to your computer and use it in GitHub Desktop.
Save bshramin/79db938acb8c71d3a1981e1eb87cb3fe to your computer and use it in GitHub Desktop.
rust-lightning add a node to test_onchain_to_onchain_claim test
#[test]
fn test_onchain_to_onchain_claim() {
// Test that in case of channel closure, we detect the state of output and claim HTLC
// on downstream peer's remote commitment tx.
// First, have C claim an HTLC against its own latest commitment transaction.
// Then, broadcast these to B, which should update the monitor downstream on the A<->B
// channel.
// Finally, check that B will claim the HTLC output if A's latest commitment transaction
// gets broadcast.
let chanmon_cfgs = create_chanmon_cfgs(4);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
// Create some initial channels
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
// Ensure all nodes are at the same height
let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
// Rebalance the network a bit by relaying one payment through all the channels ...
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 3_000_000);
let commitment_tx = get_local_commitment_txn!(nodes[2], chan_3.2);
check_spends!(commitment_tx[0], chan_3.3);
nodes[3].node.claim_funds(payment_preimage);
expect_payment_claimed!(nodes[3], payment_hash, 3_000_000);
check_added_monitors!(nodes[3], 1);
let updates = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fail_htlcs.is_empty());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
assert!(updates.update_fail_malformed_htlcs.is_empty());
mine_transaction(&nodes[3], &commitment_tx[0]);
check_closed_broadcast!(nodes[3], true);
check_added_monitors!(nodes[3], 1);
check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
let c_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
assert_eq!(c_txn.len(), 1);
check_spends!(c_txn[0], commitment_tx[0]);
assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
assert_eq!(c_txn[0].lock_time.0, 0); // Success tx
// So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), c_txn[0].clone()]});
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
}
match events[1] {
Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
assert_eq!(fee_earned_msat, Some(1000));
assert_eq!(prev_channel_id, Some(chan_1.2));
assert_eq!(claim_from_onchain_tx, true);
assert_eq!(next_channel_id, Some(chan_2.2));
},
_ => panic!("Unexpected event"),
}
check_added_monitors!(nodes[1], 1);
let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
match nodes_2_event {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
_ => panic!("Unexpected event"),
}
match nodes_0_event {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert!(update_add_htlcs.is_empty());
assert!(update_fail_htlcs.is_empty());
assert_eq!(update_fulfill_htlcs.len(), 1);
assert!(update_fail_malformed_htlcs.is_empty());
assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
},
_ => panic!("Unexpected event"),
};
// Ensure that the last remaining message event is the BroadcastChannelUpdate msg for chan_2
match msg_events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
// Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
mine_transaction(&nodes[1], &commitment_tx[0]);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
// ChannelMonitor: HTLC-Success tx
assert_eq!(b_txn.len(), 1);
check_spends!(b_txn[0], commitment_tx[0]);
assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
assert_eq!(b_txn[0].lock_time.0, 0); // Success tx
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment