2019-08-30 12:42:39 +02:00
|
|
|
"""
|
2020-01-06 12:44:47 +01:00
|
|
|
Bitmessage Protocol
|
2019-08-30 12:42:39 +02:00
|
|
|
"""
|
2020-01-06 12:44:47 +01:00
|
|
|
# pylint: disable=attribute-defined-outside-init, too-few-public-methods
|
2017-05-24 16:51:49 +02:00
|
|
|
import base64
|
2017-03-11 11:12:08 +01:00
|
|
|
import hashlib
|
2019-08-06 13:04:33 +02:00
|
|
|
import logging
|
2017-03-11 11:12:08 +01:00
|
|
|
import socket
|
2017-05-24 16:51:49 +02:00
|
|
|
import struct
|
2017-10-19 09:08:05 +02:00
|
|
|
import time
|
2018-10-12 23:12:00 +02:00
|
|
|
from binascii import hexlify
|
2017-05-24 16:51:49 +02:00
|
|
|
|
2018-07-17 13:28:56 +02:00
|
|
|
import addresses
|
|
|
|
import connectionpool
|
|
|
|
import knownnodes
|
|
|
|
import protocol
|
|
|
|
import state
|
2017-06-24 12:22:41 +02:00
|
|
|
from bmconfigparser import BMConfigParser
|
2017-05-24 16:51:49 +02:00
|
|
|
from inventory import Inventory
|
2017-03-11 11:12:08 +01:00
|
|
|
from network.advanceddispatcher import AdvancedDispatcher
|
2018-07-17 13:28:56 +02:00
|
|
|
from network.bmobject import (
|
2020-01-24 15:16:05 +01:00
|
|
|
BMObject, BMObjectAlreadyHaveError, BMObjectExpiredError,
|
|
|
|
BMObjectInsufficientPOWError, BMObjectInvalidDataError,
|
|
|
|
BMObjectInvalidError, BMObjectUnwantedStreamError
|
2020-01-06 12:44:47 +01:00
|
|
|
)
|
|
|
|
from network.constants import (
|
|
|
|
ADDRESS_ALIVE, MAX_MESSAGE_SIZE, MAX_OBJECT_COUNT,
|
|
|
|
MAX_OBJECT_PAYLOAD_SIZE, MAX_TIME_OFFSET
|
|
|
|
)
|
|
|
|
from network.dandelion import Dandelion
|
2018-12-10 13:33:07 +01:00
|
|
|
from network.proxy import ProxyError
|
2019-11-03 16:11:52 +01:00
|
|
|
from node import Node, Peer
|
2020-01-24 15:16:05 +01:00
|
|
|
from objectracker import ObjectTracker, missingObjects
|
2020-02-05 13:34:45 +01:00
|
|
|
from queues import invQueue, objectProcessorQueue, portCheckerQueue
|
2018-07-17 13:28:56 +02:00
|
|
|
from randomtrackingdict import RandomTrackingDict
|
2017-03-11 11:12:08 +01:00
|
|
|
|
2019-08-06 13:04:33 +02:00
|
|
|
logger = logging.getLogger('default')
|
|
|
|
|
2019-02-04 11:16:10 +01:00
|
|
|
|
2017-12-29 08:49:08 +01:00
|
|
|
class BMProtoError(ProxyError):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""A Bitmessage Protocol Base Error"""
|
2017-12-29 08:49:08 +01:00
|
|
|
errorCodes = ("Protocol error")
|
2017-03-11 11:12:08 +01:00
|
|
|
|
|
|
|
|
2017-12-29 08:49:08 +01:00
|
|
|
class BMProtoInsufficientDataError(BMProtoError):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""A Bitmessage Protocol Insufficient Data Error"""
|
2017-12-29 08:49:08 +01:00
|
|
|
errorCodes = ("Insufficient data")
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
|
2017-12-29 08:49:08 +01:00
|
|
|
class BMProtoExcessiveDataError(BMProtoError):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""A Bitmessage Protocol Excessive Data Error"""
|
2017-12-29 08:49:08 +01:00
|
|
|
errorCodes = ("Too much data")
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
|
2017-05-27 19:09:21 +02:00
|
|
|
class BMProto(AdvancedDispatcher, ObjectTracker):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""A parser for the Bitmessage Protocol"""
|
2019-08-30 12:42:39 +02:00
|
|
|
# pylint: disable=too-many-instance-attributes, too-many-public-methods
|
2019-02-04 11:16:10 +01:00
|
|
|
timeOffsetWrongCount = 0
|
2017-05-27 19:09:21 +02:00
|
|
|
|
2020-01-06 12:44:47 +01:00
|
|
|
def __init__(self, address=None, sock=None):
|
|
|
|
# pylint: disable=unused-argument, super-init-not-called
|
2017-05-29 00:24:07 +02:00
|
|
|
AdvancedDispatcher.__init__(self, sock)
|
|
|
|
self.isOutbound = False
|
|
|
|
# packet/connection from a local IP
|
|
|
|
self.local = False
|
2018-12-18 22:47:34 +01:00
|
|
|
self.pendingUpload = RandomTrackingDict()
|
2019-11-16 11:52:36 +01:00
|
|
|
# canonical identifier of network group
|
|
|
|
self.network_group = None
|
2017-03-20 18:32:26 +01:00
|
|
|
|
|
|
|
def bm_proto_reset(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Reset the bitmessage object parser"""
|
2017-03-20 18:32:26 +01:00
|
|
|
self.magic = None
|
|
|
|
self.command = None
|
2017-05-24 16:51:49 +02:00
|
|
|
self.payloadLength = 0
|
2017-03-20 18:32:26 +01:00
|
|
|
self.checksum = None
|
2017-03-11 11:12:08 +01:00
|
|
|
self.payload = None
|
2017-03-20 18:32:26 +01:00
|
|
|
self.invalid = False
|
2017-04-16 18:27:15 +02:00
|
|
|
self.payloadOffset = 0
|
2017-05-27 22:30:30 +02:00
|
|
|
self.expectBytes = protocol.Header.size
|
2017-05-24 16:51:49 +02:00
|
|
|
self.object = None
|
2017-03-11 11:12:08 +01:00
|
|
|
|
2017-03-20 18:32:26 +01:00
|
|
|
def state_bm_header(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Process incoming header"""
|
2018-07-17 13:28:56 +02:00
|
|
|
self.magic, self.command, self.payloadLength, self.checksum = \
|
|
|
|
protocol.Header.unpack(self.read_buf[:protocol.Header.size])
|
2017-03-20 18:32:26 +01:00
|
|
|
self.command = self.command.rstrip('\x00')
|
|
|
|
if self.magic != 0xE9BEB4D9:
|
|
|
|
# skip 1 byte in order to sync
|
2017-07-06 19:45:36 +02:00
|
|
|
self.set_state("bm_header", length=1)
|
2017-03-20 18:32:26 +01:00
|
|
|
self.bm_proto_reset()
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('Bad magic')
|
2017-10-19 09:00:02 +02:00
|
|
|
if self.socket.type == socket.SOCK_STREAM:
|
|
|
|
self.close_reason = "Bad magic"
|
|
|
|
self.set_state("close")
|
2017-05-25 23:04:33 +02:00
|
|
|
return False
|
2019-11-27 06:47:04 +01:00
|
|
|
if self.payloadLength > MAX_MESSAGE_SIZE:
|
2017-03-20 18:32:26 +01:00
|
|
|
self.invalid = True
|
2018-07-17 13:28:56 +02:00
|
|
|
self.set_state(
|
|
|
|
"bm_command",
|
|
|
|
length=protocol.Header.size, expectBytes=self.payloadLength)
|
2017-03-20 18:32:26 +01:00
|
|
|
return True
|
2018-07-17 13:28:56 +02:00
|
|
|
|
2019-08-30 12:42:39 +02:00
|
|
|
def state_bm_command(self): # pylint: disable=too-many-branches
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Process incoming command"""
|
2017-03-20 18:32:26 +01:00
|
|
|
self.payload = self.read_buf[:self.payloadLength]
|
|
|
|
if self.checksum != hashlib.sha512(self.payload).digest()[0:4]:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('Bad checksum, ignoring')
|
2017-03-20 18:32:26 +01:00
|
|
|
self.invalid = True
|
2017-04-04 10:46:01 +02:00
|
|
|
retval = True
|
2018-07-17 13:28:56 +02:00
|
|
|
if not self.fullyEstablished and self.command not in (
|
|
|
|
"error", "version", "verack"):
|
|
|
|
logger.error(
|
|
|
|
'Received command %s before connection was fully'
|
|
|
|
' established, ignoring', self.command)
|
2017-05-24 16:51:49 +02:00
|
|
|
self.invalid = True
|
2017-03-20 18:32:26 +01:00
|
|
|
if not self.invalid:
|
|
|
|
try:
|
2018-07-17 13:28:56 +02:00
|
|
|
retval = getattr(
|
|
|
|
self, "bm_command_" + str(self.command).lower())()
|
2017-03-20 18:32:26 +01:00
|
|
|
except AttributeError:
|
|
|
|
# unimplemented command
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('unimplemented command %s', self.command)
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMProtoInsufficientDataError:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('packet length too short, skipping')
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMProtoExcessiveDataError:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('too much data, skipping')
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectInsufficientPOWError:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('insufficient PoW, skipping')
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectInvalidDataError:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('object invalid data, skipping')
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectExpiredError:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('object expired, skipping')
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectUnwantedStreamError:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('object not in wanted stream, skipping')
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectInvalidError:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('object invalid, skipping')
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectAlreadyHaveError:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug(
|
|
|
|
'%(host)s:%(port)i already got object, skipping',
|
|
|
|
self.destination._asdict())
|
2017-05-24 16:51:49 +02:00
|
|
|
except struct.error:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('decoding error, skipping')
|
2017-10-19 01:46:32 +02:00
|
|
|
elif self.socket.type == socket.SOCK_DGRAM:
|
|
|
|
# broken read, ignore
|
|
|
|
pass
|
2017-03-11 11:12:08 +01:00
|
|
|
else:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('Closing due to invalid command %s', self.command)
|
|
|
|
self.close_reason = "Invalid command %s" % self.command
|
2017-10-19 09:08:05 +02:00
|
|
|
self.set_state("close")
|
2017-05-25 23:04:33 +02:00
|
|
|
return False
|
2017-04-04 10:46:01 +02:00
|
|
|
if retval:
|
2017-07-06 19:45:36 +02:00
|
|
|
self.set_state("bm_header", length=self.payloadLength)
|
2017-04-04 10:46:01 +02:00
|
|
|
self.bm_proto_reset()
|
|
|
|
# else assume the command requires a different state to follow
|
2017-03-20 18:32:26 +01:00
|
|
|
return True
|
2017-03-11 11:12:08 +01:00
|
|
|
|
2017-04-16 18:27:15 +02:00
|
|
|
def decode_payload_string(self, length):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Read and return `length` bytes from payload"""
|
2018-07-17 13:28:56 +02:00
|
|
|
value = self.payload[self.payloadOffset:self.payloadOffset + length]
|
2017-04-16 18:27:15 +02:00
|
|
|
self.payloadOffset += length
|
|
|
|
return value
|
|
|
|
|
|
|
|
def decode_payload_varint(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Decode a varint from the payload"""
|
2020-01-06 12:44:47 +01:00
|
|
|
value, offset = addresses.decodeVarint(
|
|
|
|
self.payload[self.payloadOffset:])
|
2017-04-16 18:27:15 +02:00
|
|
|
self.payloadOffset += offset
|
|
|
|
return value
|
|
|
|
|
|
|
|
def decode_payload_node(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Decode node details from the payload"""
|
2018-07-17 13:28:56 +02:00
|
|
|
# protocol.checkIPAddress()
|
2017-05-24 16:51:49 +02:00
|
|
|
services, host, port = self.decode_payload_content("Q16sH")
|
|
|
|
if host[0:12] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF':
|
2017-10-20 01:07:30 +02:00
|
|
|
host = socket.inet_ntop(socket.AF_INET, str(host[12:16]))
|
2017-05-24 16:51:49 +02:00
|
|
|
elif host[0:6] == '\xfd\x87\xd8\x7e\xeb\x43':
|
|
|
|
# Onion, based on BMD/bitcoind
|
|
|
|
host = base64.b32encode(host[6:]).lower() + ".onion"
|
|
|
|
else:
|
2017-10-20 01:07:30 +02:00
|
|
|
host = socket.inet_ntop(socket.AF_INET6, str(host))
|
2017-05-24 16:51:49 +02:00
|
|
|
if host == "":
|
2018-07-17 13:28:56 +02:00
|
|
|
# This can happen on Windows systems which are not 64-bit
|
|
|
|
# compatible so let us drop the IPv6 address.
|
2017-10-20 01:07:30 +02:00
|
|
|
host = socket.inet_ntop(socket.AF_INET, str(host[12:16]))
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
return Node(services, host, port)
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2020-01-06 12:44:47 +01:00
|
|
|
# pylint: disable=too-many-branches, too-many-statements
|
|
|
|
def decode_payload_content(self, pattern="v"):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""
|
|
|
|
Decode the payload depending on pattern:
|
|
|
|
|
|
|
|
L = varint indicating the length of the next array
|
|
|
|
l = varint indicating the length of the next item
|
|
|
|
v = varint (or array)
|
|
|
|
H = uint16
|
|
|
|
I = uint32
|
|
|
|
Q = uint64
|
|
|
|
i = net_addr (without time and stream number)
|
|
|
|
s = string
|
|
|
|
0-9 = length of the next item
|
|
|
|
, = end of array
|
|
|
|
"""
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2020-01-06 12:44:47 +01:00
|
|
|
# pylint: disable=inconsistent-return-statements
|
|
|
|
def decode_simple(self, char="v"):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Decode the payload using one char pattern"""
|
2017-07-10 07:15:27 +02:00
|
|
|
if char == "v":
|
|
|
|
return self.decode_payload_varint()
|
|
|
|
if char == "i":
|
|
|
|
return self.decode_payload_node()
|
|
|
|
if char == "H":
|
|
|
|
self.payloadOffset += 2
|
2018-07-17 13:28:56 +02:00
|
|
|
return struct.unpack(">H", self.payload[
|
|
|
|
self.payloadOffset - 2:self.payloadOffset])[0]
|
2017-07-10 07:15:27 +02:00
|
|
|
if char == "I":
|
|
|
|
self.payloadOffset += 4
|
2018-07-17 13:28:56 +02:00
|
|
|
return struct.unpack(">I", self.payload[
|
|
|
|
self.payloadOffset - 4:self.payloadOffset])[0]
|
2017-07-10 07:15:27 +02:00
|
|
|
if char == "Q":
|
|
|
|
self.payloadOffset += 8
|
2018-07-17 13:28:56 +02:00
|
|
|
return struct.unpack(">Q", self.payload[
|
|
|
|
self.payloadOffset - 8:self.payloadOffset])[0]
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-07-10 07:15:27 +02:00
|
|
|
size = None
|
|
|
|
isArray = False
|
|
|
|
|
|
|
|
# size
|
|
|
|
# iterator starting from size counting to 0
|
|
|
|
# isArray?
|
|
|
|
# subpattern
|
|
|
|
# position of parser in subpattern
|
|
|
|
# retval (array)
|
|
|
|
parserStack = [[1, 1, False, pattern, 0, []]]
|
|
|
|
|
|
|
|
while True:
|
|
|
|
i = parserStack[-1][3][parserStack[-1][4]]
|
2018-07-17 13:28:56 +02:00
|
|
|
if i in "0123456789" and (
|
2019-08-30 12:42:39 +02:00
|
|
|
size is None or parserStack[-1][3][parserStack[-1][4] - 1]
|
2018-07-17 13:28:56 +02:00
|
|
|
not in "lL"):
|
2017-07-10 07:15:27 +02:00
|
|
|
try:
|
|
|
|
size = size * 10 + int(i)
|
|
|
|
except TypeError:
|
|
|
|
size = int(i)
|
|
|
|
isArray = False
|
|
|
|
elif i in "Ll" and size is None:
|
2017-05-24 16:51:49 +02:00
|
|
|
size = self.decode_payload_varint()
|
2018-07-17 13:28:56 +02:00
|
|
|
isArray = i == "L"
|
2017-07-10 07:15:27 +02:00
|
|
|
elif size is not None:
|
|
|
|
if isArray:
|
2018-07-17 13:28:56 +02:00
|
|
|
parserStack.append([
|
|
|
|
size, size, isArray,
|
|
|
|
parserStack[-1][3][parserStack[-1][4]:], 0, []
|
|
|
|
])
|
2017-07-10 07:15:27 +02:00
|
|
|
parserStack[-2][4] = len(parserStack[-2][3])
|
|
|
|
else:
|
|
|
|
for j in range(parserStack[-1][4], len(parserStack[-1][3])):
|
|
|
|
if parserStack[-1][3][j] not in "lL0123456789":
|
|
|
|
break
|
2019-08-30 12:42:39 +02:00
|
|
|
# pylint: disable=undefined-loop-variable
|
2018-07-17 13:28:56 +02:00
|
|
|
parserStack.append([
|
|
|
|
size, size, isArray,
|
|
|
|
parserStack[-1][3][parserStack[-1][4]:j + 1], 0, []
|
|
|
|
])
|
2017-07-11 10:29:29 +02:00
|
|
|
parserStack[-2][4] += len(parserStack[-1][3]) - 1
|
2017-07-10 07:15:27 +02:00
|
|
|
size = None
|
|
|
|
continue
|
|
|
|
elif i == "s":
|
2018-07-17 13:28:56 +02:00
|
|
|
# if parserStack[-2][2]:
|
|
|
|
# parserStack[-1][5].append(self.payload[
|
|
|
|
# self.payloadOffset:self.payloadOffset + parserStack[-1][0]])
|
|
|
|
# else:
|
|
|
|
parserStack[-1][5] = self.payload[
|
|
|
|
self.payloadOffset:self.payloadOffset + parserStack[-1][0]]
|
2017-07-10 07:15:27 +02:00
|
|
|
self.payloadOffset += parserStack[-1][0]
|
|
|
|
parserStack[-1][1] = 0
|
|
|
|
parserStack[-1][2] = True
|
2018-07-17 13:28:56 +02:00
|
|
|
# del parserStack[-1]
|
2017-07-10 07:15:27 +02:00
|
|
|
size = None
|
|
|
|
elif i in "viHIQ":
|
2018-07-17 13:28:56 +02:00
|
|
|
parserStack[-1][5].append(decode_simple(
|
|
|
|
self, parserStack[-1][3][parserStack[-1][4]]))
|
2017-05-24 16:51:49 +02:00
|
|
|
size = None
|
2017-04-16 18:27:15 +02:00
|
|
|
else:
|
2017-07-10 07:15:27 +02:00
|
|
|
size = None
|
|
|
|
for depth in range(len(parserStack) - 1, -1, -1):
|
|
|
|
parserStack[depth][4] += 1
|
|
|
|
if parserStack[depth][4] >= len(parserStack[depth][3]):
|
|
|
|
parserStack[depth][1] -= 1
|
|
|
|
parserStack[depth][4] = 0
|
|
|
|
if depth > 0:
|
|
|
|
if parserStack[depth][2]:
|
2018-07-17 13:28:56 +02:00
|
|
|
parserStack[depth - 1][5].append(
|
|
|
|
parserStack[depth][5])
|
2017-07-10 07:15:27 +02:00
|
|
|
else:
|
2018-07-17 13:28:56 +02:00
|
|
|
parserStack[depth - 1][5].extend(
|
|
|
|
parserStack[depth][5])
|
2017-07-10 07:15:27 +02:00
|
|
|
parserStack[depth][5] = []
|
|
|
|
if parserStack[depth][1] <= 0:
|
|
|
|
if depth == 0:
|
2018-07-17 13:28:56 +02:00
|
|
|
# we're done, at depth 0 counter is at 0
|
|
|
|
# and pattern is done parsing
|
2017-07-10 07:15:27 +02:00
|
|
|
return parserStack[depth][5]
|
|
|
|
del parserStack[-1]
|
|
|
|
continue
|
|
|
|
break
|
|
|
|
break
|
2017-05-24 16:51:49 +02:00
|
|
|
if self.payloadOffset > self.payloadLength:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug(
|
|
|
|
'Insufficient data %i/%i',
|
|
|
|
self.payloadOffset, self.payloadLength)
|
2017-05-24 16:51:49 +02:00
|
|
|
raise BMProtoInsufficientDataError()
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-04-04 10:46:01 +02:00
|
|
|
def bm_command_error(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Decode an error message and log it"""
|
2020-01-06 12:44:47 +01:00
|
|
|
err_values = self.decode_payload_content("vvlsls")
|
|
|
|
fatalStatus = err_values[0]
|
|
|
|
# banTime = err_values[1]
|
|
|
|
# inventoryVector = err_values[2]
|
|
|
|
errorText = err_values[3]
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.error(
|
|
|
|
'%s:%i error: %i, %s', self.destination.host,
|
|
|
|
self.destination.port, fatalStatus, errorText)
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-04-04 10:46:01 +02:00
|
|
|
def bm_command_getdata(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""
|
|
|
|
Incoming request for object(s).
|
|
|
|
If we have them and some other conditions are fulfilled,
|
|
|
|
append them to the write queue.
|
|
|
|
"""
|
2017-07-10 07:15:27 +02:00
|
|
|
items = self.decode_payload_content("l32s")
|
2017-07-05 09:27:52 +02:00
|
|
|
# skip?
|
2018-12-19 09:38:38 +01:00
|
|
|
now = time.time()
|
|
|
|
if now < self.skipUntil:
|
2017-07-05 09:27:52 +02:00
|
|
|
return True
|
2018-12-20 20:33:27 +01:00
|
|
|
for i in items:
|
|
|
|
self.pendingUpload[str(i)] = now
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
|
|
|
|
2018-02-03 11:46:39 +01:00
|
|
|
def _command_inv(self, dandelion=False):
|
2017-07-10 07:15:27 +02:00
|
|
|
items = self.decode_payload_content("l32s")
|
2017-05-24 16:51:49 +02:00
|
|
|
|
2019-11-27 06:47:04 +01:00
|
|
|
if len(items) > MAX_OBJECT_COUNT:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.error(
|
|
|
|
'Too many items in %sinv message!', 'd' if dandelion else '')
|
2017-05-24 16:51:49 +02:00
|
|
|
raise BMProtoExcessiveDataError()
|
|
|
|
|
2018-02-03 11:46:39 +01:00
|
|
|
# ignore dinv if dandelion turned off
|
|
|
|
if dandelion and not state.dandelion:
|
|
|
|
return True
|
|
|
|
|
2017-10-16 08:07:32 +02:00
|
|
|
for i in map(str, items):
|
2018-02-03 11:46:39 +01:00
|
|
|
if i in Inventory() and not Dandelion().hasHash(i):
|
|
|
|
continue
|
|
|
|
if dandelion and not Dandelion().hasHash(i):
|
|
|
|
Dandelion().addHash(i, self)
|
2017-07-06 19:45:36 +02:00
|
|
|
self.handleReceivedInventory(i)
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2018-02-03 11:46:39 +01:00
|
|
|
def bm_command_inv(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Non-dandelion announce"""
|
2018-02-03 11:46:39 +01:00
|
|
|
return self._command_inv(False)
|
|
|
|
|
2017-09-25 01:17:04 +02:00
|
|
|
def bm_command_dinv(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Dandelion stem announce"""
|
2018-02-03 11:46:39 +01:00
|
|
|
return self._command_inv(True)
|
2017-09-25 01:17:04 +02:00
|
|
|
|
2017-04-04 10:46:01 +02:00
|
|
|
def bm_command_object(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Incoming object, process it"""
|
2017-05-24 16:51:49 +02:00
|
|
|
objectOffset = self.payloadOffset
|
2018-07-17 13:28:56 +02:00
|
|
|
nonce, expiresTime, objectType, version, streamNumber = \
|
|
|
|
self.decode_payload_content("QQIvv")
|
|
|
|
self.object = BMObject(
|
|
|
|
nonce, expiresTime, objectType, version, streamNumber,
|
|
|
|
self.payload, self.payloadOffset)
|
2017-05-24 16:51:49 +02:00
|
|
|
|
2019-11-27 06:47:04 +01:00
|
|
|
if len(self.payload) - self.payloadOffset > MAX_OBJECT_PAYLOAD_SIZE:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.info(
|
|
|
|
'The payload length of this object is too large (%d bytes).'
|
|
|
|
' Ignoring it.', len(self.payload) - self.payloadOffset)
|
2017-05-24 16:51:49 +02:00
|
|
|
raise BMProtoExcessiveDataError()
|
|
|
|
|
2017-06-02 15:43:35 +02:00
|
|
|
try:
|
2017-10-20 13:21:39 +02:00
|
|
|
self.object.checkProofOfWorkSufficient()
|
2017-06-02 15:43:35 +02:00
|
|
|
self.object.checkEOLSanity()
|
2017-06-21 12:16:33 +02:00
|
|
|
self.object.checkAlreadyHave()
|
2018-07-17 13:28:56 +02:00
|
|
|
except (BMObjectExpiredError, BMObjectAlreadyHaveError,
|
|
|
|
BMObjectInsufficientPOWError):
|
2017-07-05 09:25:49 +02:00
|
|
|
BMProto.stopDownloadingObject(self.object.inventoryHash)
|
2018-07-17 13:28:56 +02:00
|
|
|
raise
|
2017-06-24 12:22:41 +02:00
|
|
|
try:
|
|
|
|
self.object.checkStream()
|
2018-07-17 13:28:56 +02:00
|
|
|
except BMObjectUnwantedStreamError:
|
|
|
|
acceptmismatch = BMConfigParser().get(
|
|
|
|
"inventory", "acceptmismatch")
|
|
|
|
BMProto.stopDownloadingObject(
|
|
|
|
self.object.inventoryHash, acceptmismatch)
|
|
|
|
if not acceptmismatch:
|
|
|
|
raise
|
2017-05-30 23:53:43 +02:00
|
|
|
|
2017-07-05 09:25:49 +02:00
|
|
|
try:
|
|
|
|
self.object.checkObjectByType()
|
2018-07-17 13:28:56 +02:00
|
|
|
objectProcessorQueue.put((
|
|
|
|
self.object.objectType, buffer(self.object.data)))
|
|
|
|
except BMObjectInvalidError:
|
2017-07-05 09:25:49 +02:00
|
|
|
BMProto.stopDownloadingObject(self.object.inventoryHash, True)
|
2017-10-20 23:21:25 +02:00
|
|
|
else:
|
|
|
|
try:
|
2018-12-10 13:33:07 +01:00
|
|
|
del missingObjects[self.object.inventoryHash]
|
2017-10-20 23:21:25 +02:00
|
|
|
except KeyError:
|
|
|
|
pass
|
2017-06-24 12:21:06 +02:00
|
|
|
|
2020-01-06 12:44:47 +01:00
|
|
|
if self.object.inventoryHash in Inventory() and Dandelion().hasHash(
|
|
|
|
self.object.inventoryHash):
|
|
|
|
Dandelion().removeHash(
|
|
|
|
self.object.inventoryHash, "cycle detection")
|
2018-02-03 11:46:39 +01:00
|
|
|
|
2017-05-30 23:53:43 +02:00
|
|
|
Inventory()[self.object.inventoryHash] = (
|
2018-07-17 13:28:56 +02:00
|
|
|
self.object.objectType, self.object.streamNumber,
|
|
|
|
buffer(self.payload[objectOffset:]), self.object.expiresTime,
|
|
|
|
buffer(self.object.tag)
|
|
|
|
)
|
|
|
|
self.handleReceivedObject(
|
|
|
|
self.object.streamNumber, self.object.inventoryHash)
|
|
|
|
invQueue.put((
|
|
|
|
self.object.streamNumber, self.object.inventoryHash,
|
|
|
|
self.destination))
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-05-27 19:09:21 +02:00
|
|
|
def _decode_addr(self):
|
2017-07-10 07:15:27 +02:00
|
|
|
return self.decode_payload_content("LQIQ16sH")
|
2017-05-27 19:09:21 +02:00
|
|
|
|
2017-04-16 18:27:15 +02:00
|
|
|
def bm_command_addr(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Incoming addresses, process them"""
|
2020-01-06 12:44:47 +01:00
|
|
|
# pylint: disable=redefined-outer-name
|
|
|
|
addresses = self._decode_addr()
|
|
|
|
for seenTime, stream, _, ip, port in addresses:
|
2017-10-20 01:07:30 +02:00
|
|
|
decodedIP = protocol.checkIPAddress(str(ip))
|
2017-05-25 23:04:33 +02:00
|
|
|
if stream not in state.streamsInWhichIAmParticipating:
|
|
|
|
continue
|
2018-10-11 15:27:08 +02:00
|
|
|
if (
|
2020-05-18 18:00:13 +02:00
|
|
|
decodedIP
|
|
|
|
and time.time() - seenTime > 0
|
|
|
|
and seenTime > time.time() - ADDRESS_ALIVE
|
|
|
|
and port > 0
|
2018-10-11 15:27:08 +02:00
|
|
|
):
|
2019-11-03 16:11:52 +01:00
|
|
|
peer = Peer(decodedIP, port)
|
2019-12-04 11:33:20 +01:00
|
|
|
|
|
|
|
with knownnodes.knownNodesLock:
|
|
|
|
# isnew =
|
|
|
|
knownnodes.addKnownNode(stream, peer, seenTime)
|
|
|
|
|
|
|
|
# since we don't track peers outside of knownnodes,
|
|
|
|
# only spread if in knownnodes to prevent flood
|
|
|
|
# DISABLED TO WORKAROUND FLOOD/LEAK
|
|
|
|
# if isnew:
|
|
|
|
# addrQueue.put((
|
|
|
|
# stream, peer, seenTime, self.destination))
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-05-25 14:59:18 +02:00
|
|
|
def bm_command_portcheck(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Incoming port check request, queue it."""
|
2019-11-03 16:11:52 +01:00
|
|
|
portCheckerQueue.put(Peer(self.destination, self.peerNode.port))
|
2017-05-25 14:59:18 +02:00
|
|
|
return True
|
|
|
|
|
2017-04-04 10:46:01 +02:00
|
|
|
def bm_command_ping(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Incoming ping, respond to it."""
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.CreatePacket('pong'))
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2019-08-30 12:42:39 +02:00
|
|
|
def bm_command_pong(self): # pylint: disable=no-self-use
|
2019-07-10 13:27:42 +02:00
|
|
|
"""
|
|
|
|
Incoming pong.
|
|
|
|
Ignore it. PyBitmessage pings connections after about 5 minutes
|
|
|
|
of inactivity, and leaves it to the TCP stack to handle actual
|
|
|
|
timeouts. So there is no need to do anything when a pong arrives.
|
|
|
|
"""
|
2017-04-16 18:27:15 +02:00
|
|
|
# nothing really
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-04 10:46:01 +02:00
|
|
|
|
2017-03-20 18:32:26 +01:00
|
|
|
def bm_command_verack(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""
|
|
|
|
Incoming verack.
|
|
|
|
If already sent my own verack, handshake is complete (except
|
|
|
|
potentially waiting for buffers to flush), so we can continue
|
|
|
|
to the main connection phase. If not sent verack yet,
|
|
|
|
continue processing.
|
|
|
|
"""
|
2017-03-11 11:12:08 +01:00
|
|
|
self.verackReceived = True
|
2018-07-17 13:28:56 +02:00
|
|
|
if not self.verackSent:
|
|
|
|
return True
|
|
|
|
self.set_state(
|
|
|
|
"tls_init" if self.isSSL else "connection_fully_established",
|
|
|
|
length=self.payloadLength, expectBytes=0)
|
|
|
|
return False
|
2017-03-11 11:12:08 +01:00
|
|
|
|
2017-03-20 18:32:26 +01:00
|
|
|
def bm_command_version(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""
|
|
|
|
Incoming version.
|
|
|
|
Parse and log, remember important things, like streams, bitfields, etc.
|
|
|
|
"""
|
2020-05-21 00:17:12 +02:00
|
|
|
decoded = self.decode_payload_content("IQQiiQlslv")
|
2018-07-17 13:28:56 +02:00
|
|
|
(self.remoteProtocolVersion, self.services, self.timestamp,
|
2020-05-21 00:17:12 +02:00
|
|
|
self.sockNode, self.peerNode, self.nonce, self.userAgent
|
|
|
|
) = decoded[:7]
|
|
|
|
self.streams = decoded[7:]
|
2017-05-27 19:09:21 +02:00
|
|
|
self.nonce = struct.pack('>Q', self.nonce)
|
2017-04-16 18:27:15 +02:00
|
|
|
self.timeOffset = self.timestamp - int(time.time())
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('remoteProtocolVersion: %i', self.remoteProtocolVersion)
|
|
|
|
logger.debug('services: 0x%08X', self.services)
|
2019-08-07 17:31:08 +02:00
|
|
|
logger.debug('time offset: %i', self.timeOffset)
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('my external IP: %s', self.sockNode.host)
|
|
|
|
logger.debug(
|
|
|
|
'remote node incoming address: %s:%i',
|
|
|
|
self.destination.host, self.peerNode.port)
|
|
|
|
logger.debug('user agent: %s', self.userAgent)
|
|
|
|
logger.debug('streams: [%s]', ','.join(map(str, self.streams)))
|
2017-04-04 10:46:01 +02:00
|
|
|
if not self.peerValidityChecks():
|
2019-07-10 13:27:42 +02:00
|
|
|
# ABORT afterwards
|
2017-04-04 10:46:01 +02:00
|
|
|
return True
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.CreatePacket('verack'))
|
2017-04-04 10:46:01 +02:00
|
|
|
self.verackSent = True
|
2017-05-24 16:51:49 +02:00
|
|
|
if not self.isOutbound:
|
2018-07-17 13:28:56 +02:00
|
|
|
self.append_write_buf(protocol.assembleVersionMessage(
|
|
|
|
self.destination.host, self.destination.port,
|
|
|
|
connectionpool.BMConnectionPool().streams, True,
|
|
|
|
nodeid=self.nodeid))
|
|
|
|
logger.debug(
|
|
|
|
'%(host)s:%(port)i sending version',
|
|
|
|
self.destination._asdict())
|
2020-05-18 18:00:13 +02:00
|
|
|
if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL)
|
|
|
|
and protocol.haveSSL(not self.isOutbound)):
|
2017-04-04 10:46:01 +02:00
|
|
|
self.isSSL = True
|
2018-07-17 13:28:56 +02:00
|
|
|
if not self.verackReceived:
|
|
|
|
return True
|
|
|
|
self.set_state(
|
|
|
|
"tls_init" if self.isSSL else "connection_fully_established",
|
|
|
|
length=self.payloadLength, expectBytes=0)
|
|
|
|
return False
|
2017-04-04 10:46:01 +02:00
|
|
|
|
2020-01-06 12:44:47 +01:00
|
|
|
# pylint: disable=too-many-return-statements
|
|
|
|
def peerValidityChecks(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Check the validity of the peer"""
|
2017-04-04 10:46:01 +02:00
|
|
|
if self.remoteProtocolVersion < 3:
|
2018-07-17 13:28:56 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(
|
|
|
|
errorText="Your is using an old protocol. Closing connection.",
|
|
|
|
fatal=2))
|
|
|
|
logger.debug(
|
|
|
|
'Closing connection to old protocol version %s, node: %s',
|
|
|
|
self.remoteProtocolVersion, self.destination)
|
2017-04-04 10:46:01 +02:00
|
|
|
return False
|
2019-11-27 06:47:04 +01:00
|
|
|
if self.timeOffset > MAX_TIME_OFFSET:
|
2018-07-17 13:28:56 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(
|
2020-01-06 12:44:47 +01:00
|
|
|
errorText="Your time is too far in the future"
|
|
|
|
" compared to mine. Closing connection.", fatal=2))
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.info(
|
|
|
|
"%s's time is too far in the future (%s seconds)."
|
|
|
|
" Closing connection to it.", self.destination, self.timeOffset)
|
2019-02-04 11:16:10 +01:00
|
|
|
BMProto.timeOffsetWrongCount += 1
|
2017-04-04 10:46:01 +02:00
|
|
|
return False
|
2019-11-27 06:47:04 +01:00
|
|
|
elif self.timeOffset < -MAX_TIME_OFFSET:
|
2018-07-17 13:28:56 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(
|
|
|
|
errorText="Your time is too far in the past compared to mine."
|
|
|
|
" Closing connection.", fatal=2))
|
|
|
|
logger.info(
|
|
|
|
"%s's time is too far in the past (timeOffset %s seconds)."
|
|
|
|
" Closing connection to it.", self.destination, self.timeOffset)
|
2019-02-04 11:16:10 +01:00
|
|
|
BMProto.timeOffsetWrongCount += 1
|
2017-04-04 10:46:01 +02:00
|
|
|
return False
|
|
|
|
else:
|
2019-02-04 11:16:10 +01:00
|
|
|
BMProto.timeOffsetWrongCount = 0
|
2017-06-24 12:13:35 +02:00
|
|
|
if not self.streams:
|
2018-07-17 13:28:56 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(
|
|
|
|
errorText="We don't have shared stream interests."
|
|
|
|
" Closing connection.", fatal=2))
|
|
|
|
logger.debug(
|
2020-01-06 12:44:47 +01:00
|
|
|
'Closed connection to %s because there is no overlapping'
|
|
|
|
' interest in streams.', self.destination)
|
2017-04-04 10:46:01 +02:00
|
|
|
return False
|
2018-07-17 13:28:56 +02:00
|
|
|
if self.destination in connectionpool.BMConnectionPool().inboundConnections:
|
2017-05-24 16:51:49 +02:00
|
|
|
try:
|
|
|
|
if not protocol.checkSocksIP(self.destination.host):
|
2018-07-17 13:28:56 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(
|
|
|
|
errorText="Too many connections from your IP."
|
|
|
|
" Closing connection.", fatal=2))
|
|
|
|
logger.debug(
|
2020-01-06 12:44:47 +01:00
|
|
|
'Closed connection to %s because we are already'
|
|
|
|
' connected to that IP.', self.destination)
|
2017-05-24 16:51:49 +02:00
|
|
|
return False
|
2020-05-18 18:00:13 +02:00
|
|
|
except Exception:
|
2017-05-24 16:51:49 +02:00
|
|
|
pass
|
2018-01-02 14:29:21 +01:00
|
|
|
if not self.isOutbound:
|
2018-07-17 13:28:56 +02:00
|
|
|
# incoming from a peer we're connected to as outbound,
|
|
|
|
# or server full report the same error to counter deanonymisation
|
|
|
|
if (
|
2019-11-03 16:11:52 +01:00
|
|
|
Peer(self.destination.host, self.peerNode.port)
|
|
|
|
in connectionpool.BMConnectionPool().inboundConnections
|
2020-06-09 22:00:42 +02:00
|
|
|
or len(connectionpool.BMConnectionPool())
|
2019-11-03 16:11:52 +01:00
|
|
|
> BMConfigParser().safeGetInt(
|
|
|
|
'bitmessagesettings', 'maxtotalconnections')
|
|
|
|
+ BMConfigParser().safeGetInt(
|
|
|
|
'bitmessagesettings', 'maxbootstrapconnections')
|
2018-07-17 13:28:56 +02:00
|
|
|
):
|
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(
|
|
|
|
errorText="Server full, please try again later.", fatal=2))
|
|
|
|
logger.debug(
|
|
|
|
'Closed connection to %s due to server full'
|
|
|
|
' or duplicate inbound/outbound.', self.destination)
|
2018-01-02 14:29:21 +01:00
|
|
|
return False
|
2018-07-17 13:28:56 +02:00
|
|
|
if connectionpool.BMConnectionPool().isAlreadyConnected(
|
|
|
|
self.nonce):
|
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(
|
|
|
|
errorText="I'm connected to myself. Closing connection.",
|
|
|
|
fatal=2))
|
|
|
|
logger.debug(
|
|
|
|
"Closed connection to %s because I'm connected to myself.",
|
|
|
|
self.destination)
|
2017-05-29 14:41:02 +02:00
|
|
|
return False
|
2017-05-27 19:09:21 +02:00
|
|
|
|
2017-03-20 18:32:26 +01:00
|
|
|
return True
|
2017-03-11 11:12:08 +01:00
|
|
|
|
2017-07-05 09:25:49 +02:00
|
|
|
@staticmethod
|
|
|
|
def stopDownloadingObject(hashId, forwardAnyway=False):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Stop downloading an object"""
|
2019-11-03 13:09:00 +01:00
|
|
|
for connection in connectionpool.BMConnectionPool().connections():
|
2017-07-05 09:25:49 +02:00
|
|
|
try:
|
2018-02-01 12:19:39 +01:00
|
|
|
del connection.objectsNewToMe[hashId]
|
2017-07-05 09:25:49 +02:00
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
if not forwardAnyway:
|
|
|
|
try:
|
|
|
|
with connection.objectsNewToThemLock:
|
|
|
|
del connection.objectsNewToThem[hashId]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2017-10-20 23:11:33 +02:00
|
|
|
try:
|
2018-12-10 13:33:07 +01:00
|
|
|
del missingObjects[hashId]
|
2017-10-20 23:11:33 +02:00
|
|
|
except KeyError:
|
|
|
|
pass
|
2017-07-05 09:25:49 +02:00
|
|
|
|
2017-10-19 09:08:05 +02:00
|
|
|
def handle_close(self):
|
2019-07-10 13:27:42 +02:00
|
|
|
"""Handle close"""
|
2017-05-25 23:04:33 +02:00
|
|
|
self.set_state("close")
|
2017-11-17 13:37:51 +01:00
|
|
|
if not (self.accepting or self.connecting or self.connected):
|
|
|
|
# already disconnected
|
|
|
|
return
|
2017-10-19 09:08:05 +02:00
|
|
|
try:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug(
|
|
|
|
'%s:%i: closing, %s', self.destination.host,
|
|
|
|
self.destination.port, self.close_reason)
|
2017-10-19 09:08:05 +02:00
|
|
|
except AttributeError:
|
2017-06-10 10:13:49 +02:00
|
|
|
try:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug(
|
|
|
|
'%(host)s:%(port)i: closing', self.destination._asdict())
|
2017-06-10 10:13:49 +02:00
|
|
|
except AttributeError:
|
2018-07-17 13:28:56 +02:00
|
|
|
logger.debug('Disconnected socket closing')
|
2017-06-02 07:09:35 +02:00
|
|
|
AdvancedDispatcher.handle_close(self)
|
2018-10-12 23:12:00 +02:00
|
|
|
|
|
|
|
|
|
|
|
class BMStringParser(BMProto):
|
|
|
|
"""
|
|
|
|
A special case of BMProto used by objectProcessor to send ACK
|
|
|
|
"""
|
|
|
|
def __init__(self):
|
|
|
|
super(BMStringParser, self).__init__()
|
2019-11-03 16:11:52 +01:00
|
|
|
self.destination = Peer('127.0.0.1', 8444)
|
2018-10-12 23:12:00 +02:00
|
|
|
self.payload = None
|
|
|
|
ObjectTracker.__init__(self)
|
|
|
|
|
|
|
|
def send_data(self, data):
|
|
|
|
"""Send object given by the data string"""
|
|
|
|
# This class is introduced specially for ACK sending, please
|
|
|
|
# change log strings if you are going to use it for something else
|
|
|
|
self.bm_proto_reset()
|
|
|
|
self.payload = data
|
|
|
|
try:
|
|
|
|
self.bm_command_object()
|
|
|
|
except BMObjectAlreadyHaveError:
|
|
|
|
pass # maybe the same msg received on different nodes
|
|
|
|
except BMObjectExpiredError:
|
|
|
|
logger.debug(
|
|
|
|
'Sending ACK failure (expired): %s', hexlify(data))
|
|
|
|
except Exception as e:
|
|
|
|
logger.debug(
|
|
|
|
'Exception of type %s while sending ACK',
|
|
|
|
type(e), exc_info=True)
|