Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions common/status_levels.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ enum log_level {
};
#define LOG_LEVEL_MAX LOG_BROKEN

/* Things that can happen in real life, but we don't expect under CI. */
#define CI_UNEXPECTED "That's weird: "

const char *log_level_name(enum log_level level);
bool log_level_parse(const char *levelstr, size_t len,
enum log_level *level);
Expand Down
3 changes: 1 addition & 2 deletions connectd/multiplex.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,7 @@ static void maybe_free_peer(struct peer *peer)
* not reading, we have to give up. */
static void close_peer_io_timeout(struct peer *peer)
{
/* BROKEN means we'll trigger CI if we see it, though it's possible */
status_peer_broken(&peer->id, "Peer did not close, forcing close");
status_peer_unusual(&peer->id, CI_UNEXPECTED "Peer did not close, forcing close");
io_close(peer->to_peer);
}

Expand Down
2 changes: 1 addition & 1 deletion contrib/pyln-testing/pyln/testing/fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -609,7 +609,7 @@ def checkBadGossip(node):

def checkBroken(node):
node.daemon.logs_catchup()
broken_lines = [l for l in node.daemon.logs if '**BROKEN**' in l]
broken_lines = [l for l in node.daemon.logs if '**BROKEN**' in l or "That's weird: " in l]
if node.broken_log:
ex = re.compile(node.broken_log)
broken_lines = [l for l in broken_lines if not ex.search(l)]
Expand Down
8 changes: 7 additions & 1 deletion tests/test_bookkeeper.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,8 +318,14 @@ def test_bookkeeping_missed_chans_leases(node_factory, bitcoind):
l1.wait_local_channel_active(scid)
channel_id = first_channel_id(l1, l2)

# Sigh. bookkeeper sorts events by timestamp. If the invoice event happens
# too close, it can change the order, so sleep here.
time.sleep(2)

# Send l2 funds via the channel
l1.pay(l2, invoice_msat)
l1.daemon.wait_for_log(r'coin movement:.*\'invoice\'')
# Make sure they're completely settled, so accounting correct.
wait_for(lambda: only_one(l1.rpc.listpeerchannels()['channels'])['htlcs'] == [])

# Now turn the bookkeeper on and restart
l1.stop()
Expand Down
9 changes: 7 additions & 2 deletions tests/test_coinmoves.py
Original file line number Diff line number Diff line change
Expand Up @@ -1221,6 +1221,11 @@ def test_coinmoves_unilateral_htlc_fulfill(node_factory, bitcoind):
line = l1.daemon.is_in_log('Tracking output.*/OUR_HTLC')
htlc = int(re.search(r'output [0-9a-f]{64}:([0-9]):', line).group(1))

# commitment tx weight can vary (DER sigs, FML) and so even though the feerate target
# is fixed, the amount of the child tx we create will vary, hence the change varies.
# So it's usually 15579000, but one in 128 it will be 15586000...
anchor_change_msats = bitcoind.rpc.gettxout(anchor_spend_txid, 0)['value'] * 100_000_000_000

expected_chain1 += [{'account_id': 'wallet', # Anchor spend from fundchannel change
'blockheight': 104,
'credit_msat': 0,
Expand All @@ -1232,10 +1237,10 @@ def test_coinmoves_unilateral_htlc_fulfill(node_factory, bitcoind):
'utxo': f"{fundchannel['txid']}:{fundchannel['outnum'] ^ 1}"},
{'account_id': 'wallet', # Change from anchor spend
'blockheight': 104,
'credit_msat': 15579000,
'credit_msat': anchor_change_msats,
'debit_msat': 0,
'extra_tags': [],
'output_msat': 15579000,
'output_msat': anchor_change_msats,
'primary_tag': 'deposit',
'utxo': f"{anchor_spend_txid}:0"},
{'account_id': fundchannel['channel_id'],
Expand Down
28 changes: 19 additions & 9 deletions tests/test_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -4045,19 +4045,29 @@ def test_sql(node_factory, bitcoind):
wait_for(lambda: l3.rpc.sql("SELECT * FROM nodes WHERE alias = '{}'".format(alias))['rows'] != [])

# Test json functions
l1.fundchannel(l2)
bitcoind.generate_block(6)
l1.rpc.pay(l2.rpc.invoice(amount_msat=1000000, label='inv1000', description='description 1000 msat')['bolt11'])
ret = l1.rpc.sql("SELECT json_object('peer_id', hex(pc.peer_id), 'alias', alias, 'htlcs',"
scidl1l3, _ = l1.fundchannel(l3)
l1.rpc.pay(l3.rpc.invoice(amount_msat=1000000, label='inv1000', description='description 1000 msat')['bolt11'])

# Two channels, l1->l3 *may* have an HTLC in flight.
ret = l1.rpc.sql("SELECT json_object('peer_id', hex(pc.peer_id), 'alias', alias, 'scid', short_channel_id, 'htlcs',"
" (SELECT json_group_array(json_object('id', hex(id), 'amount_msat', amount_msat))"
" FROM peerchannels_htlcs ph WHERE ph.row = pc.rowid)) FROM peerchannels pc JOIN nodes n"
" ON pc.peer_id = n.nodeid ORDER BY n.alias, pc.peer_id;")
assert len(ret['rows']) == 2
row1 = json.loads(ret['rows'][0][0])
row2 = json.loads(ret['rows'][1][0])
assert row1['peer_id'] == format(l2.info['id'].upper())
assert len(row2['htlcs']) == 1
assert row2['htlcs'][0]['amount_msat'] == 1000000
row1 = json.loads(only_one(ret['rows'][0]))
row2 = json.loads(only_one(ret['rows'][1]))
assert row1 in ({"peer_id": format(l3.info['id'].upper()),
"alias": l3.rpc.getinfo()['alias'],
"scid": scidl1l3,
"htlcs": []},
{"peer_id": format(l3.info['id'].upper()),
"alias": l3.rpc.getinfo()['alias'],
"scid": scidl1l3,
"htlcs": [{"id": "30", "amount_msat": 1000000}]})
assert row2 == {"peer_id": format(l2.info['id'].upper()),
"alias": l2.rpc.getinfo()['alias'],
"scid": scid,
"htlcs": []}


def test_sql_deprecated(node_factory, bitcoind):
Expand Down
3 changes: 2 additions & 1 deletion tests/test_reckless.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import subprocess
from pathlib import PosixPath, Path
import socket
from pyln.testing.utils import VALGRIND
from pyln.testing.utils import VALGRIND, SLOW_MACHINE
import pytest
import os
import re
Expand Down Expand Up @@ -351,6 +351,7 @@ def test_tag_install(node_factory):
header = line


@unittest.skipIf(VALGRIND and SLOW_MACHINE, "node too slow for starting plugin under valgrind")
def test_reckless_uv_install(node_factory):
node = get_reckless_node(node_factory)
node.start()
Expand Down
Loading