mirror of
https://github.com/LBRYFoundation/LBRY-Vault.git
synced 2025-09-01 09:45:18 +00:00
split lnrouter from lnbase
This commit is contained in:
parent
f6763b6084
commit
8ba63380b4
4 changed files with 539 additions and 483 deletions
481
lib/lnbase.py
481
lib/lnbase.py
|
@ -35,6 +35,7 @@ from . import constants
|
||||||
from . import transaction
|
from . import transaction
|
||||||
from .util import PrintError, bh2u, print_error, bfh, profiler, xor_bytes
|
from .util import PrintError, bh2u, print_error, bfh, profiler, xor_bytes
|
||||||
from .transaction import opcodes, Transaction
|
from .transaction import opcodes, Transaction
|
||||||
|
from .lnrouter import new_onion_packet, OnionHopsDataSingle, OnionPerHop
|
||||||
|
|
||||||
from collections import namedtuple, defaultdict
|
from collections import namedtuple, defaultdict
|
||||||
|
|
||||||
|
@ -562,8 +563,10 @@ def is_synced(network):
|
||||||
synced = server_height != 0 and network.is_up_to_date() and local_height >= server_height
|
synced = server_height != 0 and network.is_up_to_date() and local_height >= server_height
|
||||||
return synced
|
return synced
|
||||||
|
|
||||||
|
|
||||||
class Peer(PrintError):
|
class Peer(PrintError):
|
||||||
def __init__(self, host, port, pubkey, privkey, network, request_initial_sync=False):
|
|
||||||
|
def __init__(self, host, port, pubkey, privkey, network, channel_db, path_finder, request_initial_sync=False):
|
||||||
self.update_add_htlc_event = asyncio.Event()
|
self.update_add_htlc_event = asyncio.Event()
|
||||||
self.channel_update_event = asyncio.Event()
|
self.channel_update_event = asyncio.Event()
|
||||||
self.host = host
|
self.host = host
|
||||||
|
@ -571,6 +574,8 @@ class Peer(PrintError):
|
||||||
self.privkey = privkey
|
self.privkey = privkey
|
||||||
self.pubkey = pubkey
|
self.pubkey = pubkey
|
||||||
self.network = network
|
self.network = network
|
||||||
|
self.channel_db = channel_db
|
||||||
|
self.path_finder = path_finder
|
||||||
self.read_buffer = b''
|
self.read_buffer = b''
|
||||||
self.ping_time = 0
|
self.ping_time = 0
|
||||||
self.futures = ["channel_accepted",
|
self.futures = ["channel_accepted",
|
||||||
|
@ -591,10 +596,6 @@ class Peer(PrintError):
|
||||||
self.commitment_signed = defaultdict(asyncio.Future)
|
self.commitment_signed = defaultdict(asyncio.Future)
|
||||||
self.initialized = asyncio.Future()
|
self.initialized = asyncio.Future()
|
||||||
self.localfeatures = (0x08 if request_initial_sync else 0)
|
self.localfeatures = (0x08 if request_initial_sync else 0)
|
||||||
# view of the network
|
|
||||||
self.nodes = {} # received node announcements
|
|
||||||
self.channel_db = ChannelDB()
|
|
||||||
self.path_finder = LNPathFinder(self.channel_db)
|
|
||||||
self.unfulfilled_htlcs = []
|
self.unfulfilled_htlcs = []
|
||||||
|
|
||||||
def diagnostic_name(self):
|
def diagnostic_name(self):
|
||||||
|
@ -1333,476 +1334,6 @@ class Peer(PrintError):
|
||||||
self.revoke_and_ack[channel_id].set_result(payload)
|
self.revoke_and_ack[channel_id].set_result(payload)
|
||||||
|
|
||||||
|
|
||||||
class ChannelInfo(PrintError):
|
|
||||||
|
|
||||||
def __init__(self, channel_announcement_payload):
|
|
||||||
self.channel_id = channel_announcement_payload['short_channel_id']
|
|
||||||
self.node_id_1 = channel_announcement_payload['node_id_1']
|
|
||||||
self.node_id_2 = channel_announcement_payload['node_id_2']
|
|
||||||
assert type(self.node_id_1) is bytes
|
|
||||||
assert type(self.node_id_2) is bytes
|
|
||||||
assert list(sorted([self.node_id_1, self.node_id_2])) == [self.node_id_1, self.node_id_2]
|
|
||||||
|
|
||||||
self.capacity_sat = None
|
|
||||||
self.policy_node1 = None
|
|
||||||
self.policy_node2 = None
|
|
||||||
|
|
||||||
def set_capacity(self, capacity):
|
|
||||||
# TODO call this after looking up UTXO for funding txn on chain
|
|
||||||
self.capacity_sat = capacity
|
|
||||||
|
|
||||||
def on_channel_update(self, msg_payload):
|
|
||||||
assert self.channel_id == msg_payload['short_channel_id']
|
|
||||||
flags = int.from_bytes(msg_payload['flags'], 'big')
|
|
||||||
direction = flags & 1
|
|
||||||
if direction == 0:
|
|
||||||
self.policy_node1 = ChannelInfoDirectedPolicy(msg_payload)
|
|
||||||
else:
|
|
||||||
self.policy_node2 = ChannelInfoDirectedPolicy(msg_payload)
|
|
||||||
self.print_error('channel update', binascii.hexlify(self.channel_id).decode("ascii"), flags)
|
|
||||||
|
|
||||||
def get_policy_for_node(self, node_id):
|
|
||||||
if node_id == self.node_id_1:
|
|
||||||
return self.policy_node1
|
|
||||||
elif node_id == self.node_id_2:
|
|
||||||
return self.policy_node2
|
|
||||||
else:
|
|
||||||
raise Exception('node_id {} not in channel {}'.format(node_id, self.channel_id))
|
|
||||||
|
|
||||||
|
|
||||||
class ChannelInfoDirectedPolicy:
|
|
||||||
|
|
||||||
def __init__(self, channel_update_payload):
|
|
||||||
self.cltv_expiry_delta = channel_update_payload['cltv_expiry_delta']
|
|
||||||
self.htlc_minimum_msat = channel_update_payload['htlc_minimum_msat']
|
|
||||||
self.fee_base_msat = channel_update_payload['fee_base_msat']
|
|
||||||
self.fee_proportional_millionths = channel_update_payload['fee_proportional_millionths']
|
|
||||||
self.cltv_expiry_delta = int.from_bytes(self.cltv_expiry_delta, "big")
|
|
||||||
self.htlc_minimum_msat = int.from_bytes(self.htlc_minimum_msat, "big")
|
|
||||||
self.fee_base_msat = int.from_bytes(self.fee_base_msat, "big")
|
|
||||||
self.fee_proportional_millionths = int.from_bytes(self.fee_proportional_millionths, "big")
|
|
||||||
|
|
||||||
class ChannelDB(PrintError):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self._id_to_channel_info = {}
|
|
||||||
self._channels_for_node = defaultdict(set) # node -> set(short_channel_id)
|
|
||||||
|
|
||||||
def get_channel_info(self, channel_id):
|
|
||||||
return self._id_to_channel_info.get(channel_id, None)
|
|
||||||
|
|
||||||
def get_channels_for_node(self, node_id):
|
|
||||||
"""Returns the set of channels that have node_id as one of the endpoints."""
|
|
||||||
return self._channels_for_node[node_id]
|
|
||||||
|
|
||||||
def on_channel_announcement(self, msg_payload):
|
|
||||||
short_channel_id = msg_payload['short_channel_id']
|
|
||||||
self.print_error('channel announcement', binascii.hexlify(short_channel_id).decode("ascii"))
|
|
||||||
channel_info = ChannelInfo(msg_payload)
|
|
||||||
self._id_to_channel_info[short_channel_id] = channel_info
|
|
||||||
self._channels_for_node[channel_info.node_id_1].add(short_channel_id)
|
|
||||||
self._channels_for_node[channel_info.node_id_2].add(short_channel_id)
|
|
||||||
|
|
||||||
def on_channel_update(self, msg_payload):
|
|
||||||
short_channel_id = msg_payload['short_channel_id']
|
|
||||||
try:
|
|
||||||
channel_info = self._id_to_channel_info[short_channel_id]
|
|
||||||
except KeyError:
|
|
||||||
print("could not find", short_channel_id)
|
|
||||||
else:
|
|
||||||
channel_info.on_channel_update(msg_payload)
|
|
||||||
|
|
||||||
def remove_channel(self, short_channel_id):
|
|
||||||
try:
|
|
||||||
channel_info = self._id_to_channel_info[short_channel_id]
|
|
||||||
except KeyError:
|
|
||||||
self.print_error('cannot find channel {}'.format(short_channel_id))
|
|
||||||
return
|
|
||||||
self._id_to_channel_info.pop(short_channel_id, None)
|
|
||||||
for node in (channel_info.node_id_1, channel_info.node_id_2):
|
|
||||||
try:
|
|
||||||
self._channels_for_node[node].remove(short_channel_id)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class RouteEdge:
|
|
||||||
|
|
||||||
def __init__(self, node_id: bytes, short_channel_id: bytes,
|
|
||||||
channel_policy: ChannelInfoDirectedPolicy):
|
|
||||||
self.node_id = node_id
|
|
||||||
self.short_channel_id = short_channel_id
|
|
||||||
self.channel_policy = channel_policy
|
|
||||||
|
|
||||||
|
|
||||||
class LNPathFinder(PrintError):
|
|
||||||
|
|
||||||
def __init__(self, channel_db):
|
|
||||||
self.channel_db = channel_db
|
|
||||||
|
|
||||||
def _edge_cost(self, short_channel_id: bytes, start_node: bytes, payment_amt_msat: int) -> float:
|
|
||||||
"""Heuristic cost of going through a channel.
|
|
||||||
direction: 0 or 1. --- 0 means node_id_1 -> node_id_2
|
|
||||||
"""
|
|
||||||
channel_info = self.channel_db.get_channel_info(short_channel_id)
|
|
||||||
if channel_info is None:
|
|
||||||
return float('inf')
|
|
||||||
|
|
||||||
channel_policy = channel_info.get_policy_for_node(start_node)
|
|
||||||
if channel_policy is None: return float('inf')
|
|
||||||
cltv_expiry_delta = channel_policy.cltv_expiry_delta
|
|
||||||
htlc_minimum_msat = channel_policy.htlc_minimum_msat
|
|
||||||
fee_base_msat = channel_policy.fee_base_msat
|
|
||||||
fee_proportional_millionths = channel_policy.fee_proportional_millionths
|
|
||||||
if payment_amt_msat is not None:
|
|
||||||
if payment_amt_msat < htlc_minimum_msat:
|
|
||||||
return float('inf') # payment amount too little
|
|
||||||
if channel_info.capacity_sat is not None and \
|
|
||||||
payment_amt_msat // 1000 > channel_info.capacity_sat:
|
|
||||||
return float('inf') # payment amount too large
|
|
||||||
amt = payment_amt_msat or 50000 * 1000 # guess for typical payment amount
|
|
||||||
fee_msat = fee_base_msat + amt * fee_proportional_millionths / 1000000
|
|
||||||
# TODO revise
|
|
||||||
# paying 10 more satoshis ~ waiting one more block
|
|
||||||
fee_cost = fee_msat / 1000 / 10
|
|
||||||
cltv_cost = cltv_expiry_delta
|
|
||||||
return cltv_cost + fee_cost + 1
|
|
||||||
|
|
||||||
@profiler
|
|
||||||
def find_path_for_payment(self, from_node_id: bytes, to_node_id: bytes,
|
|
||||||
amount_msat: int=None) -> Sequence[Tuple[bytes, bytes]]:
|
|
||||||
"""Return a path between from_node_id and to_node_id.
|
|
||||||
|
|
||||||
Returns a list of (node_id, short_channel_id) representing a path.
|
|
||||||
To get from node ret[n][0] to ret[n+1][0], use channel ret[n+1][1];
|
|
||||||
i.e. an element reads as, "to get to node_id, travel through short_channel_id"
|
|
||||||
"""
|
|
||||||
if amount_msat is not None: assert type(amount_msat) is int
|
|
||||||
# TODO find multiple paths??
|
|
||||||
|
|
||||||
# run Dijkstra
|
|
||||||
distance_from_start = defaultdict(lambda: float('inf'))
|
|
||||||
distance_from_start[from_node_id] = 0
|
|
||||||
prev_node = {}
|
|
||||||
nodes_to_explore = queue.PriorityQueue()
|
|
||||||
nodes_to_explore.put((0, from_node_id))
|
|
||||||
|
|
||||||
while nodes_to_explore.qsize() > 0:
|
|
||||||
dist_to_cur_node, cur_node = nodes_to_explore.get()
|
|
||||||
if cur_node == to_node_id:
|
|
||||||
break
|
|
||||||
if dist_to_cur_node != distance_from_start[cur_node]:
|
|
||||||
# queue.PriorityQueue does not implement decrease_priority,
|
|
||||||
# so instead of decreasing priorities, we add items again into the queue.
|
|
||||||
# so there are duplicates in the queue, that we discard now:
|
|
||||||
continue
|
|
||||||
for edge_channel_id in self.channel_db.get_channels_for_node(cur_node):
|
|
||||||
channel_info = self.channel_db.get_channel_info(edge_channel_id)
|
|
||||||
node1, node2 = channel_info.node_id_1, channel_info.node_id_2
|
|
||||||
neighbour = node2 if node1 == cur_node else node1
|
|
||||||
alt_dist_to_neighbour = distance_from_start[cur_node] \
|
|
||||||
+ self._edge_cost(edge_channel_id, cur_node, amount_msat)
|
|
||||||
if alt_dist_to_neighbour < distance_from_start[neighbour]:
|
|
||||||
distance_from_start[neighbour] = alt_dist_to_neighbour
|
|
||||||
prev_node[neighbour] = cur_node, edge_channel_id
|
|
||||||
nodes_to_explore.put((alt_dist_to_neighbour, neighbour))
|
|
||||||
else:
|
|
||||||
return None # no path found
|
|
||||||
|
|
||||||
# backtrack from end to start
|
|
||||||
cur_node = to_node_id
|
|
||||||
path = []
|
|
||||||
while cur_node != from_node_id:
|
|
||||||
prev_node_id, edge_taken = prev_node[cur_node]
|
|
||||||
path += [(cur_node, edge_taken)]
|
|
||||||
cur_node = prev_node_id
|
|
||||||
path.reverse()
|
|
||||||
return path
|
|
||||||
|
|
||||||
def create_route_from_path(self, path, from_node_id: bytes) -> Sequence[RouteEdge]:
|
|
||||||
assert type(from_node_id) is bytes
|
|
||||||
if path is None:
|
|
||||||
raise Exception('cannot create route from None path')
|
|
||||||
route = []
|
|
||||||
prev_node_id = from_node_id
|
|
||||||
for node_id, short_channel_id in path:
|
|
||||||
channel_info = self.channel_db.get_channel_info(short_channel_id)
|
|
||||||
if channel_info is None:
|
|
||||||
raise Exception('cannot find channel info for short_channel_id: {}'.format(bh2u(short_channel_id)))
|
|
||||||
channel_policy = channel_info.get_policy_for_node(prev_node_id)
|
|
||||||
if channel_policy is None:
|
|
||||||
raise Exception('cannot find channel policy for short_channel_id: {}'.format(bh2u(short_channel_id)))
|
|
||||||
route.append(RouteEdge(node_id, short_channel_id, channel_policy))
|
|
||||||
prev_node_id = node_id
|
|
||||||
return route
|
|
||||||
|
|
||||||
|
|
||||||
# bolt 04, "onion" ----->
|
|
||||||
|
|
||||||
NUM_MAX_HOPS_IN_PATH = 20
|
|
||||||
HOPS_DATA_SIZE = 1300 # also sometimes called routingInfoSize in bolt-04
|
|
||||||
PER_HOP_FULL_SIZE = 65 # HOPS_DATA_SIZE / 20
|
|
||||||
NUM_STREAM_BYTES = HOPS_DATA_SIZE + PER_HOP_FULL_SIZE
|
|
||||||
PER_HOP_HMAC_SIZE = 32
|
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedOnionPacketVersion(Exception): pass
|
|
||||||
class InvalidOnionMac(Exception): pass
|
|
||||||
|
|
||||||
|
|
||||||
class OnionPerHop:
|
|
||||||
|
|
||||||
def __init__(self, short_channel_id: bytes, amt_to_forward: bytes, outgoing_cltv_value: bytes):
|
|
||||||
self.short_channel_id = short_channel_id
|
|
||||||
self.amt_to_forward = amt_to_forward
|
|
||||||
self.outgoing_cltv_value = outgoing_cltv_value
|
|
||||||
|
|
||||||
def to_bytes(self) -> bytes:
|
|
||||||
ret = self.short_channel_id
|
|
||||||
ret += self.amt_to_forward
|
|
||||||
ret += self.outgoing_cltv_value
|
|
||||||
ret += bytes(12) # padding
|
|
||||||
if len(ret) != 32:
|
|
||||||
raise Exception('unexpected length {}'.format(len(ret)))
|
|
||||||
return ret
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_bytes(cls, b: bytes):
|
|
||||||
if len(b) != 32:
|
|
||||||
raise Exception('unexpected length {}'.format(len(b)))
|
|
||||||
return OnionPerHop(
|
|
||||||
short_channel_id=b[:8],
|
|
||||||
amt_to_forward=b[8:16],
|
|
||||||
outgoing_cltv_value=b[16:20]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class OnionHopsDataSingle: # called HopData in lnd
|
|
||||||
|
|
||||||
def __init__(self, per_hop: OnionPerHop = None):
|
|
||||||
self.realm = 0
|
|
||||||
self.per_hop = per_hop
|
|
||||||
self.hmac = None
|
|
||||||
|
|
||||||
def to_bytes(self) -> bytes:
|
|
||||||
ret = bytes([self.realm])
|
|
||||||
ret += self.per_hop.to_bytes()
|
|
||||||
ret += self.hmac if self.hmac is not None else bytes(PER_HOP_HMAC_SIZE)
|
|
||||||
if len(ret) != PER_HOP_FULL_SIZE:
|
|
||||||
raise Exception('unexpected length {}'.format(len(ret)))
|
|
||||||
return ret
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_bytes(cls, b: bytes):
|
|
||||||
if len(b) != PER_HOP_FULL_SIZE:
|
|
||||||
raise Exception('unexpected length {}'.format(len(b)))
|
|
||||||
ret = OnionHopsDataSingle()
|
|
||||||
ret.realm = b[0]
|
|
||||||
if ret.realm != 0:
|
|
||||||
raise Exception('only realm 0 is supported')
|
|
||||||
ret.per_hop = OnionPerHop.from_bytes(b[1:33])
|
|
||||||
ret.hmac = b[33:]
|
|
||||||
return ret
|
|
||||||
|
|
||||||
|
|
||||||
class OnionPacket:
|
|
||||||
|
|
||||||
def __init__(self, public_key: bytes, hops_data: bytes, hmac: bytes):
|
|
||||||
self.version = 0
|
|
||||||
self.public_key = public_key
|
|
||||||
self.hops_data = hops_data # also called RoutingInfo in bolt-04
|
|
||||||
self.hmac = hmac
|
|
||||||
|
|
||||||
def to_bytes(self) -> bytes:
|
|
||||||
ret = bytes([self.version])
|
|
||||||
ret += self.public_key
|
|
||||||
ret += self.hops_data
|
|
||||||
ret += self.hmac
|
|
||||||
if len(ret) != 1366:
|
|
||||||
raise Exception('unexpected length {}'.format(len(ret)))
|
|
||||||
return ret
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_bytes(cls, b: bytes):
|
|
||||||
if len(b) != 1366:
|
|
||||||
raise Exception('unexpected length {}'.format(len(b)))
|
|
||||||
version = b[0]
|
|
||||||
if version != 0:
|
|
||||||
raise UnsupportedOnionPacketVersion('version {} is not supported'.format(version))
|
|
||||||
return OnionPacket(
|
|
||||||
public_key=b[1:34],
|
|
||||||
hops_data=b[34:1334],
|
|
||||||
hmac=b[1334:]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_bolt04_onion_key(key_type: bytes, secret: bytes) -> bytes:
|
|
||||||
if key_type not in (b'rho', b'mu', b'um', b'ammag'):
|
|
||||||
raise Exception('invalid key_type {}'.format(key_type))
|
|
||||||
key = hmac.new(key_type, msg=secret, digestmod=hashlib.sha256).digest()
|
|
||||||
return key
|
|
||||||
|
|
||||||
|
|
||||||
def get_shared_secrets_along_route(payment_path_pubkeys: Sequence[bytes],
|
|
||||||
session_key: bytes) -> Sequence[bytes]:
|
|
||||||
num_hops = len(payment_path_pubkeys)
|
|
||||||
hop_shared_secrets = num_hops * [b'']
|
|
||||||
ephemeral_key = session_key
|
|
||||||
# compute shared key for each hop
|
|
||||||
for i in range(0, num_hops):
|
|
||||||
hop_shared_secrets[i] = get_ecdh(ephemeral_key, payment_path_pubkeys[i])
|
|
||||||
ephemeral_pubkey = ecc.ECPrivkey(ephemeral_key).get_public_key_bytes()
|
|
||||||
blinding_factor = sha256(ephemeral_pubkey + hop_shared_secrets[i])
|
|
||||||
blinding_factor_int = int.from_bytes(blinding_factor, byteorder="big")
|
|
||||||
ephemeral_key_int = int.from_bytes(ephemeral_key, byteorder="big")
|
|
||||||
ephemeral_key_int = ephemeral_key_int * blinding_factor_int % SECP256k1.order
|
|
||||||
ephemeral_key = ephemeral_key_int.to_bytes(32, byteorder="big")
|
|
||||||
return hop_shared_secrets
|
|
||||||
|
|
||||||
|
|
||||||
def new_onion_packet(payment_path_pubkeys: Sequence[bytes], session_key: bytes,
|
|
||||||
hops_data: Sequence[OnionHopsDataSingle], associated_data: bytes) -> OnionPacket:
|
|
||||||
num_hops = len(payment_path_pubkeys)
|
|
||||||
hop_shared_secrets = get_shared_secrets_along_route(payment_path_pubkeys, session_key)
|
|
||||||
|
|
||||||
filler = generate_filler(b'rho', num_hops, PER_HOP_FULL_SIZE, hop_shared_secrets)
|
|
||||||
mix_header = bytes(HOPS_DATA_SIZE)
|
|
||||||
next_hmac = bytes(PER_HOP_HMAC_SIZE)
|
|
||||||
|
|
||||||
# compute routing info and MAC for each hop
|
|
||||||
for i in range(num_hops-1, -1, -1):
|
|
||||||
rho_key = get_bolt04_onion_key(b'rho', hop_shared_secrets[i])
|
|
||||||
mu_key = get_bolt04_onion_key(b'mu', hop_shared_secrets[i])
|
|
||||||
hops_data[i].hmac = next_hmac
|
|
||||||
stream_bytes = generate_cipher_stream(rho_key, NUM_STREAM_BYTES)
|
|
||||||
mix_header = mix_header[:-PER_HOP_FULL_SIZE]
|
|
||||||
mix_header = hops_data[i].to_bytes() + mix_header
|
|
||||||
mix_header = xor_bytes(mix_header, stream_bytes)
|
|
||||||
if i == num_hops - 1 and len(filler) != 0:
|
|
||||||
mix_header = mix_header[:-len(filler)] + filler
|
|
||||||
packet = mix_header + associated_data
|
|
||||||
next_hmac = hmac.new(mu_key, msg=packet, digestmod=hashlib.sha256).digest()
|
|
||||||
|
|
||||||
return OnionPacket(
|
|
||||||
public_key=ecc.ECPrivkey(session_key).get_public_key_bytes(),
|
|
||||||
hops_data=mix_header,
|
|
||||||
hmac=next_hmac)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_filler(key_type: bytes, num_hops: int, hop_size: int,
|
|
||||||
shared_secrets: Sequence[bytes]) -> bytes:
|
|
||||||
filler_size = (NUM_MAX_HOPS_IN_PATH + 1) * hop_size
|
|
||||||
filler = bytearray(filler_size)
|
|
||||||
|
|
||||||
for i in range(0, num_hops-1): # -1, as last hop does not obfuscate
|
|
||||||
filler = filler[hop_size:]
|
|
||||||
filler += bytearray(hop_size)
|
|
||||||
stream_key = get_bolt04_onion_key(key_type, shared_secrets[i])
|
|
||||||
stream_bytes = generate_cipher_stream(stream_key, filler_size)
|
|
||||||
filler = xor_bytes(filler, stream_bytes)
|
|
||||||
|
|
||||||
return filler[(NUM_MAX_HOPS_IN_PATH-num_hops+2)*hop_size:]
|
|
||||||
|
|
||||||
|
|
||||||
def generate_cipher_stream(stream_key: bytes, num_bytes: int) -> bytes:
|
|
||||||
algo = algorithms.ChaCha20(stream_key, nonce=bytes(16))
|
|
||||||
cipher = Cipher(algo, mode=None, backend=default_backend())
|
|
||||||
encryptor = cipher.encryptor()
|
|
||||||
return encryptor.update(bytes(num_bytes))
|
|
||||||
|
|
||||||
|
|
||||||
ProcessedOnionPacket = namedtuple("ProcessedOnionPacket", ["are_we_final", "hop_data", "next_packet"])
|
|
||||||
|
|
||||||
|
|
||||||
# TODO replay protection
|
|
||||||
def process_onion_packet(onion_packet: OnionPacket, associated_data: bytes,
|
|
||||||
our_onion_private_key: bytes) -> ProcessedOnionPacket:
|
|
||||||
shared_secret = get_ecdh(our_onion_private_key, onion_packet.public_key)
|
|
||||||
|
|
||||||
# check message integrity
|
|
||||||
mu_key = get_bolt04_onion_key(b'mu', shared_secret)
|
|
||||||
calculated_mac = hmac.new(mu_key, msg=onion_packet.hops_data+associated_data,
|
|
||||||
digestmod=hashlib.sha256).digest()
|
|
||||||
if onion_packet.hmac != calculated_mac:
|
|
||||||
raise InvalidOnionMac()
|
|
||||||
|
|
||||||
# peel an onion layer off
|
|
||||||
rho_key = get_bolt04_onion_key(b'rho', shared_secret)
|
|
||||||
stream_bytes = generate_cipher_stream(rho_key, NUM_STREAM_BYTES)
|
|
||||||
padded_header = onion_packet.hops_data + bytes(PER_HOP_FULL_SIZE)
|
|
||||||
next_hops_data = xor_bytes(padded_header, stream_bytes)
|
|
||||||
|
|
||||||
# calc next ephemeral key
|
|
||||||
blinding_factor = sha256(onion_packet.public_key + shared_secret)
|
|
||||||
blinding_factor_int = int.from_bytes(blinding_factor, byteorder="big")
|
|
||||||
next_public_key_int = ecc.ECPubkey(onion_packet.public_key) * blinding_factor_int
|
|
||||||
next_public_key = next_public_key_int.get_public_key_bytes()
|
|
||||||
|
|
||||||
hop_data = OnionHopsDataSingle.from_bytes(next_hops_data[:PER_HOP_FULL_SIZE])
|
|
||||||
next_onion_packet = OnionPacket(
|
|
||||||
public_key=next_public_key,
|
|
||||||
hops_data=next_hops_data[PER_HOP_FULL_SIZE:],
|
|
||||||
hmac=hop_data.hmac
|
|
||||||
)
|
|
||||||
if hop_data.hmac == bytes(PER_HOP_HMAC_SIZE):
|
|
||||||
# we are the destination / exit node
|
|
||||||
are_we_final = True
|
|
||||||
else:
|
|
||||||
# we are an intermediate node; forwarding
|
|
||||||
are_we_final = False
|
|
||||||
return ProcessedOnionPacket(are_we_final, hop_data, next_onion_packet)
|
|
||||||
|
|
||||||
|
|
||||||
class FailedToDecodeOnionError(Exception): pass
|
|
||||||
|
|
||||||
|
|
||||||
class OnionRoutingFailureMessage:
|
|
||||||
|
|
||||||
def __init__(self, code: int, data: bytes):
|
|
||||||
self.code = code
|
|
||||||
self.data = data
|
|
||||||
def __repr__(self):
|
|
||||||
return repr((self.code, self.data))
|
|
||||||
|
|
||||||
|
|
||||||
def _decode_onion_error(error_packet: bytes, payment_path_pubkeys: Sequence[bytes],
|
|
||||||
session_key: bytes) -> (bytes, int):
|
|
||||||
"""Returns the decoded error bytes, and the index of the sender of the error."""
|
|
||||||
num_hops = len(payment_path_pubkeys)
|
|
||||||
hop_shared_secrets = get_shared_secrets_along_route(payment_path_pubkeys, session_key)
|
|
||||||
for i in range(num_hops):
|
|
||||||
ammag_key = get_bolt04_onion_key(b'ammag', hop_shared_secrets[i])
|
|
||||||
um_key = get_bolt04_onion_key(b'um', hop_shared_secrets[i])
|
|
||||||
stream_bytes = generate_cipher_stream(ammag_key, len(error_packet))
|
|
||||||
error_packet = xor_bytes(error_packet, stream_bytes)
|
|
||||||
hmac_computed = hmac.new(um_key, msg=error_packet[32:], digestmod=hashlib.sha256).digest()
|
|
||||||
hmac_found = error_packet[:32]
|
|
||||||
if hmac_computed == hmac_found:
|
|
||||||
return error_packet, i
|
|
||||||
raise FailedToDecodeOnionError()
|
|
||||||
|
|
||||||
|
|
||||||
def decode_onion_error(error_packet: bytes, payment_path_pubkeys: Sequence[bytes],
|
|
||||||
session_key: bytes) -> (OnionRoutingFailureMessage, int):
|
|
||||||
"""Returns the failure message, and the index of the sender of the error."""
|
|
||||||
decrypted_error, sender_index = _decode_onion_error(error_packet, payment_path_pubkeys, session_key)
|
|
||||||
failure_msg = get_failure_msg_from_onion_error(decrypted_error)
|
|
||||||
return failure_msg, sender_index
|
|
||||||
|
|
||||||
|
|
||||||
def get_failure_msg_from_onion_error(decrypted_error_packet: bytes) -> OnionRoutingFailureMessage:
|
|
||||||
# get failure_msg bytes from error packet
|
|
||||||
failure_len = int.from_bytes(decrypted_error_packet[32:34], byteorder='big')
|
|
||||||
failure_msg = decrypted_error_packet[34:34+failure_len]
|
|
||||||
# create failure message object
|
|
||||||
failure_code = int.from_bytes(failure_msg[:2], byteorder='big')
|
|
||||||
failure_data = failure_msg[2:]
|
|
||||||
return OnionRoutingFailureMessage(failure_code, failure_data)
|
|
||||||
|
|
||||||
|
|
||||||
# <----- bolt 04, "onion"
|
|
||||||
|
|
||||||
|
|
||||||
def count_trailing_zeros(index):
|
def count_trailing_zeros(index):
|
||||||
""" BOLT-03 (where_to_put_secret) """
|
""" BOLT-03 (where_to_put_secret) """
|
||||||
try:
|
try:
|
||||||
|
|
517
lib/lnrouter.py
Normal file
517
lib/lnrouter.py
Normal file
|
@ -0,0 +1,517 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Electrum - lightweight Bitcoin client
|
||||||
|
# Copyright (C) 2018 The Electrum developers
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person
|
||||||
|
# obtaining a copy of this software and associated documentation files
|
||||||
|
# (the "Software"), to deal in the Software without restriction,
|
||||||
|
# including without limitation the rights to use, copy, modify, merge,
|
||||||
|
# publish, distribute, sublicense, and/or sell copies of the Software,
|
||||||
|
# and to permit persons to whom the Software is furnished to do so,
|
||||||
|
# subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be
|
||||||
|
# included in all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||||
|
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||||
|
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
# SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
import queue
|
||||||
|
import traceback
|
||||||
|
import sys
|
||||||
|
import binascii
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
from collections import namedtuple, defaultdict
|
||||||
|
from typing import Sequence, Union, Tuple
|
||||||
|
|
||||||
|
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms
|
||||||
|
from cryptography.hazmat.backends import default_backend
|
||||||
|
|
||||||
|
from . import bitcoin
|
||||||
|
from . import ecc
|
||||||
|
from . import crypto
|
||||||
|
from .crypto import sha256
|
||||||
|
from .util import PrintError, bh2u, print_error, bfh, profiler, xor_bytes
|
||||||
|
from . import lnbase
|
||||||
|
|
||||||
|
|
||||||
|
class ChannelInfo(PrintError):
|
||||||
|
|
||||||
|
def __init__(self, channel_announcement_payload):
|
||||||
|
self.channel_id = channel_announcement_payload['short_channel_id']
|
||||||
|
self.node_id_1 = channel_announcement_payload['node_id_1']
|
||||||
|
self.node_id_2 = channel_announcement_payload['node_id_2']
|
||||||
|
assert type(self.node_id_1) is bytes
|
||||||
|
assert type(self.node_id_2) is bytes
|
||||||
|
assert list(sorted([self.node_id_1, self.node_id_2])) == [self.node_id_1, self.node_id_2]
|
||||||
|
|
||||||
|
self.capacity_sat = None
|
||||||
|
self.policy_node1 = None
|
||||||
|
self.policy_node2 = None
|
||||||
|
|
||||||
|
def set_capacity(self, capacity):
|
||||||
|
# TODO call this after looking up UTXO for funding txn on chain
|
||||||
|
self.capacity_sat = capacity
|
||||||
|
|
||||||
|
def on_channel_update(self, msg_payload):
|
||||||
|
assert self.channel_id == msg_payload['short_channel_id']
|
||||||
|
flags = int.from_bytes(msg_payload['flags'], 'big')
|
||||||
|
direction = flags & 1
|
||||||
|
if direction == 0:
|
||||||
|
self.policy_node1 = ChannelInfoDirectedPolicy(msg_payload)
|
||||||
|
else:
|
||||||
|
self.policy_node2 = ChannelInfoDirectedPolicy(msg_payload)
|
||||||
|
self.print_error('channel update', binascii.hexlify(self.channel_id).decode("ascii"), flags)
|
||||||
|
|
||||||
|
def get_policy_for_node(self, node_id):
|
||||||
|
if node_id == self.node_id_1:
|
||||||
|
return self.policy_node1
|
||||||
|
elif node_id == self.node_id_2:
|
||||||
|
return self.policy_node2
|
||||||
|
else:
|
||||||
|
raise Exception('node_id {} not in channel {}'.format(node_id, self.channel_id))
|
||||||
|
|
||||||
|
|
||||||
|
class ChannelInfoDirectedPolicy:
|
||||||
|
|
||||||
|
def __init__(self, channel_update_payload):
|
||||||
|
self.cltv_expiry_delta = channel_update_payload['cltv_expiry_delta']
|
||||||
|
self.htlc_minimum_msat = channel_update_payload['htlc_minimum_msat']
|
||||||
|
self.fee_base_msat = channel_update_payload['fee_base_msat']
|
||||||
|
self.fee_proportional_millionths = channel_update_payload['fee_proportional_millionths']
|
||||||
|
self.cltv_expiry_delta = int.from_bytes(self.cltv_expiry_delta, "big")
|
||||||
|
self.htlc_minimum_msat = int.from_bytes(self.htlc_minimum_msat, "big")
|
||||||
|
self.fee_base_msat = int.from_bytes(self.fee_base_msat, "big")
|
||||||
|
self.fee_proportional_millionths = int.from_bytes(self.fee_proportional_millionths, "big")
|
||||||
|
|
||||||
|
|
||||||
|
class ChannelDB(PrintError):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._id_to_channel_info = {}
|
||||||
|
self._channels_for_node = defaultdict(set) # node -> set(short_channel_id)
|
||||||
|
|
||||||
|
def get_channel_info(self, channel_id):
|
||||||
|
return self._id_to_channel_info.get(channel_id, None)
|
||||||
|
|
||||||
|
def get_channels_for_node(self, node_id):
|
||||||
|
"""Returns the set of channels that have node_id as one of the endpoints."""
|
||||||
|
return self._channels_for_node[node_id]
|
||||||
|
|
||||||
|
def on_channel_announcement(self, msg_payload):
|
||||||
|
short_channel_id = msg_payload['short_channel_id']
|
||||||
|
self.print_error('channel announcement', binascii.hexlify(short_channel_id).decode("ascii"))
|
||||||
|
channel_info = ChannelInfo(msg_payload)
|
||||||
|
self._id_to_channel_info[short_channel_id] = channel_info
|
||||||
|
self._channels_for_node[channel_info.node_id_1].add(short_channel_id)
|
||||||
|
self._channels_for_node[channel_info.node_id_2].add(short_channel_id)
|
||||||
|
|
||||||
|
def on_channel_update(self, msg_payload):
|
||||||
|
short_channel_id = msg_payload['short_channel_id']
|
||||||
|
try:
|
||||||
|
channel_info = self._id_to_channel_info[short_channel_id]
|
||||||
|
except KeyError:
|
||||||
|
print("could not find", short_channel_id)
|
||||||
|
else:
|
||||||
|
channel_info.on_channel_update(msg_payload)
|
||||||
|
|
||||||
|
def remove_channel(self, short_channel_id):
|
||||||
|
try:
|
||||||
|
channel_info = self._id_to_channel_info[short_channel_id]
|
||||||
|
except KeyError:
|
||||||
|
self.print_error('cannot find channel {}'.format(short_channel_id))
|
||||||
|
return
|
||||||
|
self._id_to_channel_info.pop(short_channel_id, None)
|
||||||
|
for node in (channel_info.node_id_1, channel_info.node_id_2):
|
||||||
|
try:
|
||||||
|
self._channels_for_node[node].remove(short_channel_id)
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class RouteEdge:
|
||||||
|
|
||||||
|
def __init__(self, node_id: bytes, short_channel_id: bytes,
|
||||||
|
channel_policy: ChannelInfoDirectedPolicy):
|
||||||
|
self.node_id = node_id
|
||||||
|
self.short_channel_id = short_channel_id
|
||||||
|
self.channel_policy = channel_policy
|
||||||
|
|
||||||
|
|
||||||
|
class LNPathFinder(PrintError):
|
||||||
|
|
||||||
|
def __init__(self, channel_db):
|
||||||
|
self.channel_db = channel_db
|
||||||
|
|
||||||
|
def _edge_cost(self, short_channel_id: bytes, start_node: bytes, payment_amt_msat: int) -> float:
|
||||||
|
"""Heuristic cost of going through a channel.
|
||||||
|
direction: 0 or 1. --- 0 means node_id_1 -> node_id_2
|
||||||
|
"""
|
||||||
|
channel_info = self.channel_db.get_channel_info(short_channel_id)
|
||||||
|
if channel_info is None:
|
||||||
|
return float('inf')
|
||||||
|
|
||||||
|
channel_policy = channel_info.get_policy_for_node(start_node)
|
||||||
|
if channel_policy is None: return float('inf')
|
||||||
|
cltv_expiry_delta = channel_policy.cltv_expiry_delta
|
||||||
|
htlc_minimum_msat = channel_policy.htlc_minimum_msat
|
||||||
|
fee_base_msat = channel_policy.fee_base_msat
|
||||||
|
fee_proportional_millionths = channel_policy.fee_proportional_millionths
|
||||||
|
if payment_amt_msat is not None:
|
||||||
|
if payment_amt_msat < htlc_minimum_msat:
|
||||||
|
return float('inf') # payment amount too little
|
||||||
|
if channel_info.capacity_sat is not None and \
|
||||||
|
payment_amt_msat // 1000 > channel_info.capacity_sat:
|
||||||
|
return float('inf') # payment amount too large
|
||||||
|
amt = payment_amt_msat or 50000 * 1000 # guess for typical payment amount
|
||||||
|
fee_msat = fee_base_msat + amt * fee_proportional_millionths / 1000000
|
||||||
|
# TODO revise
|
||||||
|
# paying 10 more satoshis ~ waiting one more block
|
||||||
|
fee_cost = fee_msat / 1000 / 10
|
||||||
|
cltv_cost = cltv_expiry_delta
|
||||||
|
return cltv_cost + fee_cost + 1
|
||||||
|
|
||||||
|
@profiler
|
||||||
|
def find_path_for_payment(self, from_node_id: bytes, to_node_id: bytes,
|
||||||
|
amount_msat: int=None) -> Sequence[Tuple[bytes, bytes]]:
|
||||||
|
"""Return a path between from_node_id and to_node_id.
|
||||||
|
|
||||||
|
Returns a list of (node_id, short_channel_id) representing a path.
|
||||||
|
To get from node ret[n][0] to ret[n+1][0], use channel ret[n+1][1];
|
||||||
|
i.e. an element reads as, "to get to node_id, travel through short_channel_id"
|
||||||
|
"""
|
||||||
|
if amount_msat is not None: assert type(amount_msat) is int
|
||||||
|
# TODO find multiple paths??
|
||||||
|
|
||||||
|
# run Dijkstra
|
||||||
|
distance_from_start = defaultdict(lambda: float('inf'))
|
||||||
|
distance_from_start[from_node_id] = 0
|
||||||
|
prev_node = {}
|
||||||
|
nodes_to_explore = queue.PriorityQueue()
|
||||||
|
nodes_to_explore.put((0, from_node_id))
|
||||||
|
|
||||||
|
while nodes_to_explore.qsize() > 0:
|
||||||
|
dist_to_cur_node, cur_node = nodes_to_explore.get()
|
||||||
|
if cur_node == to_node_id:
|
||||||
|
break
|
||||||
|
if dist_to_cur_node != distance_from_start[cur_node]:
|
||||||
|
# queue.PriorityQueue does not implement decrease_priority,
|
||||||
|
# so instead of decreasing priorities, we add items again into the queue.
|
||||||
|
# so there are duplicates in the queue, that we discard now:
|
||||||
|
continue
|
||||||
|
for edge_channel_id in self.channel_db.get_channels_for_node(cur_node):
|
||||||
|
channel_info = self.channel_db.get_channel_info(edge_channel_id)
|
||||||
|
node1, node2 = channel_info.node_id_1, channel_info.node_id_2
|
||||||
|
neighbour = node2 if node1 == cur_node else node1
|
||||||
|
alt_dist_to_neighbour = distance_from_start[cur_node] \
|
||||||
|
+ self._edge_cost(edge_channel_id, cur_node, amount_msat)
|
||||||
|
if alt_dist_to_neighbour < distance_from_start[neighbour]:
|
||||||
|
distance_from_start[neighbour] = alt_dist_to_neighbour
|
||||||
|
prev_node[neighbour] = cur_node, edge_channel_id
|
||||||
|
nodes_to_explore.put((alt_dist_to_neighbour, neighbour))
|
||||||
|
else:
|
||||||
|
return None # no path found
|
||||||
|
|
||||||
|
# backtrack from end to start
|
||||||
|
cur_node = to_node_id
|
||||||
|
path = []
|
||||||
|
while cur_node != from_node_id:
|
||||||
|
prev_node_id, edge_taken = prev_node[cur_node]
|
||||||
|
path += [(cur_node, edge_taken)]
|
||||||
|
cur_node = prev_node_id
|
||||||
|
path.reverse()
|
||||||
|
return path
|
||||||
|
|
||||||
|
def create_route_from_path(self, path, from_node_id: bytes) -> Sequence[RouteEdge]:
|
||||||
|
assert type(from_node_id) is bytes
|
||||||
|
if path is None:
|
||||||
|
raise Exception('cannot create route from None path')
|
||||||
|
route = []
|
||||||
|
prev_node_id = from_node_id
|
||||||
|
for node_id, short_channel_id in path:
|
||||||
|
channel_info = self.channel_db.get_channel_info(short_channel_id)
|
||||||
|
if channel_info is None:
|
||||||
|
raise Exception('cannot find channel info for short_channel_id: {}'.format(bh2u(short_channel_id)))
|
||||||
|
channel_policy = channel_info.get_policy_for_node(prev_node_id)
|
||||||
|
if channel_policy is None:
|
||||||
|
raise Exception('cannot find channel policy for short_channel_id: {}'.format(bh2u(short_channel_id)))
|
||||||
|
route.append(RouteEdge(node_id, short_channel_id, channel_policy))
|
||||||
|
prev_node_id = node_id
|
||||||
|
return route
|
||||||
|
|
||||||
|
|
||||||
|
# bolt 04, "onion" ----->
|
||||||
|
|
||||||
|
NUM_MAX_HOPS_IN_PATH = 20
|
||||||
|
HOPS_DATA_SIZE = 1300 # also sometimes called routingInfoSize in bolt-04
|
||||||
|
PER_HOP_FULL_SIZE = 65 # HOPS_DATA_SIZE / 20
|
||||||
|
NUM_STREAM_BYTES = HOPS_DATA_SIZE + PER_HOP_FULL_SIZE
|
||||||
|
PER_HOP_HMAC_SIZE = 32
|
||||||
|
|
||||||
|
|
||||||
|
class UnsupportedOnionPacketVersion(Exception): pass
|
||||||
|
class InvalidOnionMac(Exception): pass
|
||||||
|
|
||||||
|
|
||||||
|
class OnionPerHop:
|
||||||
|
|
||||||
|
def __init__(self, short_channel_id: bytes, amt_to_forward: bytes, outgoing_cltv_value: bytes):
|
||||||
|
self.short_channel_id = short_channel_id
|
||||||
|
self.amt_to_forward = amt_to_forward
|
||||||
|
self.outgoing_cltv_value = outgoing_cltv_value
|
||||||
|
|
||||||
|
def to_bytes(self) -> bytes:
|
||||||
|
ret = self.short_channel_id
|
||||||
|
ret += self.amt_to_forward
|
||||||
|
ret += self.outgoing_cltv_value
|
||||||
|
ret += bytes(12) # padding
|
||||||
|
if len(ret) != 32:
|
||||||
|
raise Exception('unexpected length {}'.format(len(ret)))
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_bytes(cls, b: bytes):
|
||||||
|
if len(b) != 32:
|
||||||
|
raise Exception('unexpected length {}'.format(len(b)))
|
||||||
|
return OnionPerHop(
|
||||||
|
short_channel_id=b[:8],
|
||||||
|
amt_to_forward=b[8:16],
|
||||||
|
outgoing_cltv_value=b[16:20]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OnionHopsDataSingle: # called HopData in lnd
|
||||||
|
|
||||||
|
def __init__(self, per_hop: OnionPerHop = None):
|
||||||
|
self.realm = 0
|
||||||
|
self.per_hop = per_hop
|
||||||
|
self.hmac = None
|
||||||
|
|
||||||
|
def to_bytes(self) -> bytes:
|
||||||
|
ret = bytes([self.realm])
|
||||||
|
ret += self.per_hop.to_bytes()
|
||||||
|
ret += self.hmac if self.hmac is not None else bytes(PER_HOP_HMAC_SIZE)
|
||||||
|
if len(ret) != PER_HOP_FULL_SIZE:
|
||||||
|
raise Exception('unexpected length {}'.format(len(ret)))
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_bytes(cls, b: bytes):
|
||||||
|
if len(b) != PER_HOP_FULL_SIZE:
|
||||||
|
raise Exception('unexpected length {}'.format(len(b)))
|
||||||
|
ret = OnionHopsDataSingle()
|
||||||
|
ret.realm = b[0]
|
||||||
|
if ret.realm != 0:
|
||||||
|
raise Exception('only realm 0 is supported')
|
||||||
|
ret.per_hop = OnionPerHop.from_bytes(b[1:33])
|
||||||
|
ret.hmac = b[33:]
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
class OnionPacket:
|
||||||
|
|
||||||
|
def __init__(self, public_key: bytes, hops_data: bytes, hmac: bytes):
|
||||||
|
self.version = 0
|
||||||
|
self.public_key = public_key
|
||||||
|
self.hops_data = hops_data # also called RoutingInfo in bolt-04
|
||||||
|
self.hmac = hmac
|
||||||
|
|
||||||
|
def to_bytes(self) -> bytes:
|
||||||
|
ret = bytes([self.version])
|
||||||
|
ret += self.public_key
|
||||||
|
ret += self.hops_data
|
||||||
|
ret += self.hmac
|
||||||
|
if len(ret) != 1366:
|
||||||
|
raise Exception('unexpected length {}'.format(len(ret)))
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_bytes(cls, b: bytes):
|
||||||
|
if len(b) != 1366:
|
||||||
|
raise Exception('unexpected length {}'.format(len(b)))
|
||||||
|
version = b[0]
|
||||||
|
if version != 0:
|
||||||
|
raise UnsupportedOnionPacketVersion('version {} is not supported'.format(version))
|
||||||
|
return OnionPacket(
|
||||||
|
public_key=b[1:34],
|
||||||
|
hops_data=b[34:1334],
|
||||||
|
hmac=b[1334:]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_bolt04_onion_key(key_type: bytes, secret: bytes) -> bytes:
|
||||||
|
if key_type not in (b'rho', b'mu', b'um', b'ammag'):
|
||||||
|
raise Exception('invalid key_type {}'.format(key_type))
|
||||||
|
key = hmac.new(key_type, msg=secret, digestmod=hashlib.sha256).digest()
|
||||||
|
return key
|
||||||
|
|
||||||
|
|
||||||
|
def get_shared_secrets_along_route(payment_path_pubkeys: Sequence[bytes],
|
||||||
|
session_key: bytes) -> Sequence[bytes]:
|
||||||
|
num_hops = len(payment_path_pubkeys)
|
||||||
|
hop_shared_secrets = num_hops * [b'']
|
||||||
|
ephemeral_key = session_key
|
||||||
|
# compute shared key for each hop
|
||||||
|
for i in range(0, num_hops):
|
||||||
|
hop_shared_secrets[i] = lnbase.get_ecdh(ephemeral_key, payment_path_pubkeys[i])
|
||||||
|
ephemeral_pubkey = ecc.ECPrivkey(ephemeral_key).get_public_key_bytes()
|
||||||
|
blinding_factor = sha256(ephemeral_pubkey + hop_shared_secrets[i])
|
||||||
|
blinding_factor_int = int.from_bytes(blinding_factor, byteorder="big")
|
||||||
|
ephemeral_key_int = int.from_bytes(ephemeral_key, byteorder="big")
|
||||||
|
ephemeral_key_int = ephemeral_key_int * blinding_factor_int % ecc.CURVE_ORDER
|
||||||
|
ephemeral_key = ephemeral_key_int.to_bytes(32, byteorder="big")
|
||||||
|
return hop_shared_secrets
|
||||||
|
|
||||||
|
|
||||||
|
def new_onion_packet(payment_path_pubkeys: Sequence[bytes], session_key: bytes,
|
||||||
|
hops_data: Sequence[OnionHopsDataSingle], associated_data: bytes) -> OnionPacket:
|
||||||
|
num_hops = len(payment_path_pubkeys)
|
||||||
|
hop_shared_secrets = get_shared_secrets_along_route(payment_path_pubkeys, session_key)
|
||||||
|
|
||||||
|
filler = generate_filler(b'rho', num_hops, PER_HOP_FULL_SIZE, hop_shared_secrets)
|
||||||
|
mix_header = bytes(HOPS_DATA_SIZE)
|
||||||
|
next_hmac = bytes(PER_HOP_HMAC_SIZE)
|
||||||
|
|
||||||
|
# compute routing info and MAC for each hop
|
||||||
|
for i in range(num_hops-1, -1, -1):
|
||||||
|
rho_key = get_bolt04_onion_key(b'rho', hop_shared_secrets[i])
|
||||||
|
mu_key = get_bolt04_onion_key(b'mu', hop_shared_secrets[i])
|
||||||
|
hops_data[i].hmac = next_hmac
|
||||||
|
stream_bytes = generate_cipher_stream(rho_key, NUM_STREAM_BYTES)
|
||||||
|
mix_header = mix_header[:-PER_HOP_FULL_SIZE]
|
||||||
|
mix_header = hops_data[i].to_bytes() + mix_header
|
||||||
|
mix_header = xor_bytes(mix_header, stream_bytes)
|
||||||
|
if i == num_hops - 1 and len(filler) != 0:
|
||||||
|
mix_header = mix_header[:-len(filler)] + filler
|
||||||
|
packet = mix_header + associated_data
|
||||||
|
next_hmac = hmac.new(mu_key, msg=packet, digestmod=hashlib.sha256).digest()
|
||||||
|
|
||||||
|
return OnionPacket(
|
||||||
|
public_key=ecc.ECPrivkey(session_key).get_public_key_bytes(),
|
||||||
|
hops_data=mix_header,
|
||||||
|
hmac=next_hmac)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_filler(key_type: bytes, num_hops: int, hop_size: int,
|
||||||
|
shared_secrets: Sequence[bytes]) -> bytes:
|
||||||
|
filler_size = (NUM_MAX_HOPS_IN_PATH + 1) * hop_size
|
||||||
|
filler = bytearray(filler_size)
|
||||||
|
|
||||||
|
for i in range(0, num_hops-1): # -1, as last hop does not obfuscate
|
||||||
|
filler = filler[hop_size:]
|
||||||
|
filler += bytearray(hop_size)
|
||||||
|
stream_key = get_bolt04_onion_key(key_type, shared_secrets[i])
|
||||||
|
stream_bytes = generate_cipher_stream(stream_key, filler_size)
|
||||||
|
filler = xor_bytes(filler, stream_bytes)
|
||||||
|
|
||||||
|
return filler[(NUM_MAX_HOPS_IN_PATH-num_hops+2)*hop_size:]
|
||||||
|
|
||||||
|
|
||||||
|
def generate_cipher_stream(stream_key: bytes, num_bytes: int) -> bytes:
|
||||||
|
algo = algorithms.ChaCha20(stream_key, nonce=bytes(16))
|
||||||
|
cipher = Cipher(algo, mode=None, backend=default_backend())
|
||||||
|
encryptor = cipher.encryptor()
|
||||||
|
return encryptor.update(bytes(num_bytes))
|
||||||
|
|
||||||
|
|
||||||
|
ProcessedOnionPacket = namedtuple("ProcessedOnionPacket", ["are_we_final", "hop_data", "next_packet"])
|
||||||
|
|
||||||
|
|
||||||
|
# TODO replay protection
|
||||||
|
def process_onion_packet(onion_packet: OnionPacket, associated_data: bytes,
|
||||||
|
our_onion_private_key: bytes) -> ProcessedOnionPacket:
|
||||||
|
shared_secret = lnbase.get_ecdh(our_onion_private_key, onion_packet.public_key)
|
||||||
|
|
||||||
|
# check message integrity
|
||||||
|
mu_key = get_bolt04_onion_key(b'mu', shared_secret)
|
||||||
|
calculated_mac = hmac.new(mu_key, msg=onion_packet.hops_data+associated_data,
|
||||||
|
digestmod=hashlib.sha256).digest()
|
||||||
|
if onion_packet.hmac != calculated_mac:
|
||||||
|
raise InvalidOnionMac()
|
||||||
|
|
||||||
|
# peel an onion layer off
|
||||||
|
rho_key = get_bolt04_onion_key(b'rho', shared_secret)
|
||||||
|
stream_bytes = generate_cipher_stream(rho_key, NUM_STREAM_BYTES)
|
||||||
|
padded_header = onion_packet.hops_data + bytes(PER_HOP_FULL_SIZE)
|
||||||
|
next_hops_data = xor_bytes(padded_header, stream_bytes)
|
||||||
|
|
||||||
|
# calc next ephemeral key
|
||||||
|
blinding_factor = sha256(onion_packet.public_key + shared_secret)
|
||||||
|
blinding_factor_int = int.from_bytes(blinding_factor, byteorder="big")
|
||||||
|
next_public_key_int = ecc.ECPubkey(onion_packet.public_key) * blinding_factor_int
|
||||||
|
next_public_key = next_public_key_int.get_public_key_bytes()
|
||||||
|
|
||||||
|
hop_data = OnionHopsDataSingle.from_bytes(next_hops_data[:PER_HOP_FULL_SIZE])
|
||||||
|
next_onion_packet = OnionPacket(
|
||||||
|
public_key=next_public_key,
|
||||||
|
hops_data=next_hops_data[PER_HOP_FULL_SIZE:],
|
||||||
|
hmac=hop_data.hmac
|
||||||
|
)
|
||||||
|
if hop_data.hmac == bytes(PER_HOP_HMAC_SIZE):
|
||||||
|
# we are the destination / exit node
|
||||||
|
are_we_final = True
|
||||||
|
else:
|
||||||
|
# we are an intermediate node; forwarding
|
||||||
|
are_we_final = False
|
||||||
|
return ProcessedOnionPacket(are_we_final, hop_data, next_onion_packet)
|
||||||
|
|
||||||
|
|
||||||
|
class FailedToDecodeOnionError(Exception): pass
|
||||||
|
|
||||||
|
|
||||||
|
class OnionRoutingFailureMessage:
|
||||||
|
|
||||||
|
def __init__(self, code: int, data: bytes):
|
||||||
|
self.code = code
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return repr((self.code, self.data))
|
||||||
|
|
||||||
|
|
||||||
|
def _decode_onion_error(error_packet: bytes, payment_path_pubkeys: Sequence[bytes],
|
||||||
|
session_key: bytes) -> (bytes, int):
|
||||||
|
"""Returns the decoded error bytes, and the index of the sender of the error."""
|
||||||
|
num_hops = len(payment_path_pubkeys)
|
||||||
|
hop_shared_secrets = get_shared_secrets_along_route(payment_path_pubkeys, session_key)
|
||||||
|
for i in range(num_hops):
|
||||||
|
ammag_key = get_bolt04_onion_key(b'ammag', hop_shared_secrets[i])
|
||||||
|
um_key = get_bolt04_onion_key(b'um', hop_shared_secrets[i])
|
||||||
|
stream_bytes = generate_cipher_stream(ammag_key, len(error_packet))
|
||||||
|
error_packet = xor_bytes(error_packet, stream_bytes)
|
||||||
|
hmac_computed = hmac.new(um_key, msg=error_packet[32:], digestmod=hashlib.sha256).digest()
|
||||||
|
hmac_found = error_packet[:32]
|
||||||
|
if hmac_computed == hmac_found:
|
||||||
|
return error_packet, i
|
||||||
|
raise FailedToDecodeOnionError()
|
||||||
|
|
||||||
|
|
||||||
|
def decode_onion_error(error_packet: bytes, payment_path_pubkeys: Sequence[bytes],
|
||||||
|
session_key: bytes) -> (OnionRoutingFailureMessage, int):
|
||||||
|
"""Returns the failure message, and the index of the sender of the error."""
|
||||||
|
decrypted_error, sender_index = _decode_onion_error(error_packet, payment_path_pubkeys, session_key)
|
||||||
|
failure_msg = get_failure_msg_from_onion_error(decrypted_error)
|
||||||
|
return failure_msg, sender_index
|
||||||
|
|
||||||
|
|
||||||
|
def get_failure_msg_from_onion_error(decrypted_error_packet: bytes) -> OnionRoutingFailureMessage:
|
||||||
|
# get failure_msg bytes from error packet
|
||||||
|
failure_len = int.from_bytes(decrypted_error_packet[32:34], byteorder='big')
|
||||||
|
failure_msg = decrypted_error_packet[34:34+failure_len]
|
||||||
|
# create failure message object
|
||||||
|
failure_code = int.from_bytes(failure_msg[:2], byteorder='big')
|
||||||
|
failure_data = failure_msg[2:]
|
||||||
|
return OnionRoutingFailureMessage(failure_code, failure_data)
|
||||||
|
|
||||||
|
|
||||||
|
# <----- bolt 04, "onion"
|
||||||
|
|
|
@ -10,7 +10,6 @@ import binascii
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
|
|
||||||
from .lnbase import Peer, H256
|
|
||||||
from .bitcoin import sha256, COIN
|
from .bitcoin import sha256, COIN
|
||||||
from .util import bh2u, bfh
|
from .util import bh2u, bfh
|
||||||
from .constants import set_testnet, set_simnet
|
from .constants import set_testnet, set_simnet
|
||||||
|
@ -20,6 +19,7 @@ from .storage import WalletStorage
|
||||||
from .wallet import Wallet
|
from .wallet import Wallet
|
||||||
from .lnbase import Peer, Outpoint, ChannelConfig, LocalState, RemoteState, Keypair, OnlyPubkeyKeypair, OpenChannel, ChannelConstraints, RevocationStore, aiosafe
|
from .lnbase import Peer, Outpoint, ChannelConfig, LocalState, RemoteState, Keypair, OnlyPubkeyKeypair, OpenChannel, ChannelConstraints, RevocationStore, aiosafe
|
||||||
from .lightning_payencode.lnaddr import lnencode, LnAddr, lndecode
|
from .lightning_payencode.lnaddr import lnencode, LnAddr, lndecode
|
||||||
|
from . import lnrouter
|
||||||
|
|
||||||
|
|
||||||
is_key = lambda k: k.endswith("_basepoint") or k.endswith("_key")
|
is_key = lambda k: k.endswith("_basepoint") or k.endswith("_key")
|
||||||
|
@ -90,9 +90,14 @@ class LNWorker:
|
||||||
def __init__(self, wallet, network):
|
def __init__(self, wallet, network):
|
||||||
self.wallet = wallet
|
self.wallet = wallet
|
||||||
self.network = network
|
self.network = network
|
||||||
self.privkey = H256(b"0123456789")
|
self.privkey = sha256(b"0123456789")
|
||||||
self.config = network.config
|
self.config = network.config
|
||||||
self.peers = {}
|
self.peers = {}
|
||||||
|
# view of the network
|
||||||
|
self.nodes = {} # received node announcements
|
||||||
|
self.channel_db = lnrouter.ChannelDB()
|
||||||
|
self.path_finder = lnrouter.LNPathFinder(self.channel_db)
|
||||||
|
|
||||||
self.channels = wallet.storage.get("channels", {})
|
self.channels = wallet.storage.get("channels", {})
|
||||||
peer_list = network.config.get('lightning_peers', node_list)
|
peer_list = network.config.get('lightning_peers', node_list)
|
||||||
for host, port, pubkey in peer_list:
|
for host, port, pubkey in peer_list:
|
||||||
|
@ -102,7 +107,8 @@ class LNWorker:
|
||||||
self.on_network_update('updated') # shortcut (don't block) if funding tx locked and verified
|
self.on_network_update('updated') # shortcut (don't block) if funding tx locked and verified
|
||||||
|
|
||||||
def add_peer(self, host, port, pubkey):
|
def add_peer(self, host, port, pubkey):
|
||||||
peer = Peer(host, int(port), binascii.unhexlify(pubkey), self.privkey, self.network)
|
peer = Peer(host, int(port), binascii.unhexlify(pubkey), self.privkey,
|
||||||
|
self.network, self.channel_db, self.path_finder)
|
||||||
self.network.futures.append(asyncio.run_coroutine_threadsafe(peer.main_loop(), asyncio.get_event_loop()))
|
self.network.futures.append(asyncio.run_coroutine_threadsafe(peer.main_loop(), asyncio.get_event_loop()))
|
||||||
self.peers[pubkey] = peer
|
self.peers[pubkey] = peer
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ from lib import bitcoin
|
||||||
import ecdsa.ellipticcurve
|
import ecdsa.ellipticcurve
|
||||||
from ecdsa.curves import SECP256k1
|
from ecdsa.curves import SECP256k1
|
||||||
from lib.util import bfh
|
from lib.util import bfh
|
||||||
from lib import bitcoin, lnbase
|
from lib import bitcoin, lnbase, lnrouter
|
||||||
|
|
||||||
funding_tx_id = '8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be'
|
funding_tx_id = '8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be'
|
||||||
funding_output_index = 0
|
funding_output_index = 0
|
||||||
|
@ -254,7 +254,9 @@ class Test_LNBase(unittest.TestCase):
|
||||||
success=True, cltv_timeout=0)
|
success=True, cltv_timeout=0)
|
||||||
|
|
||||||
def test_find_path_for_payment(self):
|
def test_find_path_for_payment(self):
|
||||||
p = Peer('', 0, 'a', bitcoin.sha256('privkeyseed'), None)
|
channel_db = lnrouter.ChannelDB()
|
||||||
|
path_finder = lnrouter.LNPathFinder(channel_db)
|
||||||
|
p = Peer('', 0, 'a', bitcoin.sha256('privkeyseed'), None, channel_db, path_finder)
|
||||||
p.on_channel_announcement({'node_id_1': b'b', 'node_id_2': b'c', 'short_channel_id': bfh('0000000000000001')})
|
p.on_channel_announcement({'node_id_1': b'b', 'node_id_2': b'c', 'short_channel_id': bfh('0000000000000001')})
|
||||||
p.on_channel_announcement({'node_id_1': b'b', 'node_id_2': b'e', 'short_channel_id': bfh('0000000000000002')})
|
p.on_channel_announcement({'node_id_1': b'b', 'node_id_2': b'e', 'short_channel_id': bfh('0000000000000002')})
|
||||||
p.on_channel_announcement({'node_id_1': b'a', 'node_id_2': b'b', 'short_channel_id': bfh('0000000000000003')})
|
p.on_channel_announcement({'node_id_1': b'a', 'node_id_2': b'b', 'short_channel_id': bfh('0000000000000003')})
|
||||||
|
@ -274,7 +276,7 @@ class Test_LNBase(unittest.TestCase):
|
||||||
p.on_channel_update({'short_channel_id': bfh('0000000000000005'), 'flags': b'\x00', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(999)})
|
p.on_channel_update({'short_channel_id': bfh('0000000000000005'), 'flags': b'\x00', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(999)})
|
||||||
p.on_channel_update({'short_channel_id': bfh('0000000000000006'), 'flags': b'\x00', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(99999999)})
|
p.on_channel_update({'short_channel_id': bfh('0000000000000006'), 'flags': b'\x00', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(99999999)})
|
||||||
p.on_channel_update({'short_channel_id': bfh('0000000000000006'), 'flags': b'\x01', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150)})
|
p.on_channel_update({'short_channel_id': bfh('0000000000000006'), 'flags': b'\x01', 'cltv_expiry_delta': o(10), 'htlc_minimum_msat': o(250), 'fee_base_msat': o(100), 'fee_proportional_millionths': o(150)})
|
||||||
print(p.path_finder.find_path_for_payment('a', 'e', 100000))
|
self.assertNotEqual(None, p.path_finder.find_path_for_payment(b'a', b'e', 100000))
|
||||||
|
|
||||||
def test_key_derivation(self):
|
def test_key_derivation(self):
|
||||||
# BOLT3, Appendix E
|
# BOLT3, Appendix E
|
||||||
|
@ -378,7 +380,7 @@ class Test_LNBase(unittest.TestCase):
|
||||||
self.assertEqual(bfh('0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f28368661954176cd9869da33d713aa219fcef1e5c806fef11e696bcc66844de8271c27974a0fd57c2dbcb2c6dd4e8ef35d96db28d5a0e49b6ab3d6de31af65950723b8cddc108390bebf8d149002e31bdc283056477ba27c8054c248ad7306de31663a7c99ec659da15d0f6fbc7e1687485b39e9be0ec3b70164cb3618a9b546317e7c2d62ae9f0f840704535729262d30c6132d1b390f073edec8fa057176c6268b6ad06a82ff0d16d4c662194873e8b4ecf46eb2c9d4d58d2ee2021adb19840605ac5afd8bd942dd71e8244c83e28b2ed5a3b09e9e7df5c8c747e5765ba366a4f7407a6c6b0a32f74bc5e428f7fa4c3cf70e13ed91563177d94190d5149aa4b9c96d00e40d2ac35ab9c4a621ce0f6f5df7d64a9c8d435db19de192d9db522c7f7b4e201fc1b61a9bd3efd062ae24455d463818b01e2756c7d0691bc3ac4c017be34c9a8b2913bb1b937e31e0ae40f650a7cd820bcb4996825b1cbad1ff7ccc2b513b1104524c34f6573e1b59201c005a632ee5dccd3711a32e3ba1ff00fcffbe636e4b3a84bbe491b836a57ccec138b8cc2ec733846904d872f305d538d51db8e56232ec6e07877075328874cb7b09c7e799100a9ff085dead253886b174fc408a0ea7b48bce2c5d8992285011960af088f7e006ef60089d46ac9aa15acfac6c87c3cf6904764dd785419292fbafa9cca09c8ade24a6cd63f12d1cfc83fa35cf2f1cf503c39cbf78293f06c68a3cece7177169cd872bb49bf69d933a27a887dd9daefa9239fca9f0c3e309ec61d9df947211da98cf11a6e0fb77252629cdf9f2226dd69ca73fa51be4df224592f8d471b69a1aebbdaa2f3a798b3581253d97feb0a12e6606043ca0fc5efc0f49b8061d6796eff31cd8638499e2f25ffb96eec32837438ed7ebebbe587886648f63e35d80f41869f4c308f2e6970bd65fead5e8544e3239a6acc9d996b08d1546455bcafbe88ed3ed547714841946fe2e77180e4d7bf1452414e4b1745a7897184a2c4cbc3ac46f83342a55a48e29dc8f17cf595dd28f51e297ba89fd25ed0dbd1c0081a810beaab09758a36fbfd16fbdc3daa9fe05c8a73195f244ef2743a5df761f01ee6e693eb6c7f1a7834fab3671391e5ddebf611e119a2ae4456e2cee7a6d4f27a2246cdb1f8ef35f0b3d7044b3799d8d0ed0a6470557fd807c065d6d83acba07e96e10770ada8c0b4d4921522944188d5f30086a6ee0a4795331273f32beaaa43363fc58208a257e5c5c434c7325b583642219d81c7d67b908d5263b42ac1991edc69a777da60f38eff138c844af9e549374e8b29b166211bfded24587a29394e33828b784da7e7b62ab7e49ea2693fcdd17fa96186a5ef11ef1a8adffa50f93a3119e95e6c09014f3e3b0709183fa08a826ced6deb4608b7d986ebbcf99ad58e25451d4d9d38d0059734d8501467b97182cd11e0c07c91ca50f61cc31255a3147ade654976a5989097281892aafd8df595c63bd14f1e03f5955a9398d2dd6368bbcae833ae1cc2df31eb0980b4817dfd130020ffb275743fcc01df40e3ecda1c5988e8e1bde965353b0b1bf34ea05f095000c45b6249618d275905a24d3eb58c600aeab4fb552fbf1ccdb2a5c80ace220310f89829d7e53f78c126037b6d8d500220c7a118d9621b4d6bd5379edd7e24bcf540e87aba6b88862db16fa4ee00b009fda80577be67ab94910fd8a7807dfe4ebe66b8fdcd040aa2dc17ec22639298be56b2a2c9d8940647b75f2f6d81746df16e1cb2f05e23397a8c63baea0803441ff4b7d517ff172980a056726235e2f6af85e8aa9b91ba85f14532272d6170df3166b91169dc09d4f4a251610f57ff0885a93364cfaf650bdf436c89795efed5ca934bc7ffc0a4'),
|
self.assertEqual(bfh('0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f28368661954176cd9869da33d713aa219fcef1e5c806fef11e696bcc66844de8271c27974a0fd57c2dbcb2c6dd4e8ef35d96db28d5a0e49b6ab3d6de31af65950723b8cddc108390bebf8d149002e31bdc283056477ba27c8054c248ad7306de31663a7c99ec659da15d0f6fbc7e1687485b39e9be0ec3b70164cb3618a9b546317e7c2d62ae9f0f840704535729262d30c6132d1b390f073edec8fa057176c6268b6ad06a82ff0d16d4c662194873e8b4ecf46eb2c9d4d58d2ee2021adb19840605ac5afd8bd942dd71e8244c83e28b2ed5a3b09e9e7df5c8c747e5765ba366a4f7407a6c6b0a32f74bc5e428f7fa4c3cf70e13ed91563177d94190d5149aa4b9c96d00e40d2ac35ab9c4a621ce0f6f5df7d64a9c8d435db19de192d9db522c7f7b4e201fc1b61a9bd3efd062ae24455d463818b01e2756c7d0691bc3ac4c017be34c9a8b2913bb1b937e31e0ae40f650a7cd820bcb4996825b1cbad1ff7ccc2b513b1104524c34f6573e1b59201c005a632ee5dccd3711a32e3ba1ff00fcffbe636e4b3a84bbe491b836a57ccec138b8cc2ec733846904d872f305d538d51db8e56232ec6e07877075328874cb7b09c7e799100a9ff085dead253886b174fc408a0ea7b48bce2c5d8992285011960af088f7e006ef60089d46ac9aa15acfac6c87c3cf6904764dd785419292fbafa9cca09c8ade24a6cd63f12d1cfc83fa35cf2f1cf503c39cbf78293f06c68a3cece7177169cd872bb49bf69d933a27a887dd9daefa9239fca9f0c3e309ec61d9df947211da98cf11a6e0fb77252629cdf9f2226dd69ca73fa51be4df224592f8d471b69a1aebbdaa2f3a798b3581253d97feb0a12e6606043ca0fc5efc0f49b8061d6796eff31cd8638499e2f25ffb96eec32837438ed7ebebbe587886648f63e35d80f41869f4c308f2e6970bd65fead5e8544e3239a6acc9d996b08d1546455bcafbe88ed3ed547714841946fe2e77180e4d7bf1452414e4b1745a7897184a2c4cbc3ac46f83342a55a48e29dc8f17cf595dd28f51e297ba89fd25ed0dbd1c0081a810beaab09758a36fbfd16fbdc3daa9fe05c8a73195f244ef2743a5df761f01ee6e693eb6c7f1a7834fab3671391e5ddebf611e119a2ae4456e2cee7a6d4f27a2246cdb1f8ef35f0b3d7044b3799d8d0ed0a6470557fd807c065d6d83acba07e96e10770ada8c0b4d4921522944188d5f30086a6ee0a4795331273f32beaaa43363fc58208a257e5c5c434c7325b583642219d81c7d67b908d5263b42ac1991edc69a777da60f38eff138c844af9e549374e8b29b166211bfded24587a29394e33828b784da7e7b62ab7e49ea2693fcdd17fa96186a5ef11ef1a8adffa50f93a3119e95e6c09014f3e3b0709183fa08a826ced6deb4608b7d986ebbcf99ad58e25451d4d9d38d0059734d8501467b97182cd11e0c07c91ca50f61cc31255a3147ade654976a5989097281892aafd8df595c63bd14f1e03f5955a9398d2dd6368bbcae833ae1cc2df31eb0980b4817dfd130020ffb275743fcc01df40e3ecda1c5988e8e1bde965353b0b1bf34ea05f095000c45b6249618d275905a24d3eb58c600aeab4fb552fbf1ccdb2a5c80ace220310f89829d7e53f78c126037b6d8d500220c7a118d9621b4d6bd5379edd7e24bcf540e87aba6b88862db16fa4ee00b009fda80577be67ab94910fd8a7807dfe4ebe66b8fdcd040aa2dc17ec22639298be56b2a2c9d8940647b75f2f6d81746df16e1cb2f05e23397a8c63baea0803441ff4b7d517ff172980a056726235e2f6af85e8aa9b91ba85f14532272d6170df3166b91169dc09d4f4a251610f57ff0885a93364cfaf650bdf436c89795efed5ca934bc7ffc0a4'),
|
||||||
packet.to_bytes())
|
packet.to_bytes())
|
||||||
for i, privkey in enumerate(payment_path_privkeys):
|
for i, privkey in enumerate(payment_path_privkeys):
|
||||||
processed_packet = lnbase.process_onion_packet(packet, associated_data, privkey)
|
processed_packet = lnrouter.process_onion_packet(packet, associated_data, privkey)
|
||||||
self.assertEqual(hops_data[i].per_hop.to_bytes(), processed_packet.hop_data.per_hop.to_bytes())
|
self.assertEqual(hops_data[i].per_hop.to_bytes(), processed_packet.hop_data.per_hop.to_bytes())
|
||||||
packet = processed_packet.next_packet
|
packet = processed_packet.next_packet
|
||||||
|
|
||||||
|
@ -393,7 +395,7 @@ class Test_LNBase(unittest.TestCase):
|
||||||
]
|
]
|
||||||
session_key = bfh('4141414141414141414141414141414141414141414141414141414141414141')
|
session_key = bfh('4141414141414141414141414141414141414141414141414141414141414141')
|
||||||
error_packet_for_node_0 = bfh('9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d')
|
error_packet_for_node_0 = bfh('9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d')
|
||||||
decoded_error, index_of_sender = lnbase._decode_onion_error(error_packet_for_node_0, payment_path_pubkeys, session_key)
|
decoded_error, index_of_sender = lnrouter._decode_onion_error(error_packet_for_node_0, payment_path_pubkeys, session_key)
|
||||||
self.assertEqual(bfh('4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'),
|
self.assertEqual(bfh('4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'),
|
||||||
decoded_error)
|
decoded_error)
|
||||||
self.assertEqual(4, index_of_sender)
|
self.assertEqual(4, index_of_sender)
|
||||||
|
|
Loading…
Add table
Reference in a new issue