2017-05-24 16:51:49 +02:00
|
|
|
import base64
|
2017-03-11 11:12:08 +01:00
|
|
|
import hashlib
|
2017-09-25 01:17:04 +02:00
|
|
|
import random
|
2017-03-11 11:12:08 +01:00
|
|
|
import socket
|
2017-05-24 16:51:49 +02:00
|
|
|
import struct
|
2017-10-19 09:08:05 +02:00
|
|
|
import time
|
2017-05-24 16:51:49 +02:00
|
|
|
|
2017-06-24 12:22:41 +02:00
|
|
|
from bmconfigparser import BMConfigParser
|
2017-05-24 16:51:49 +02:00
|
|
|
from debug import logger
|
|
|
|
from inventory import Inventory
|
|
|
|
import knownnodes
|
2017-03-11 11:12:08 +01:00
|
|
|
from network.advanceddispatcher import AdvancedDispatcher
|
2017-10-20 01:21:49 +02:00
|
|
|
from network.dandelion import Dandelion
|
2017-08-22 13:49:27 +02:00
|
|
|
from network.bmobject import BMObject, BMObjectInsufficientPOWError, BMObjectInvalidDataError, \
|
|
|
|
BMObjectExpiredError, BMObjectUnwantedStreamError, BMObjectInvalidError, BMObjectAlreadyHaveError
|
2017-05-24 16:51:49 +02:00
|
|
|
import network.connectionpool
|
2017-04-16 18:27:15 +02:00
|
|
|
from network.node import Node
|
2017-05-27 19:09:21 +02:00
|
|
|
from network.objectracker import ObjectTracker
|
2017-03-11 11:12:08 +01:00
|
|
|
from network.proxy import Proxy, ProxyError, GeneralProxyError
|
|
|
|
|
|
|
|
import addresses
|
2017-07-05 09:19:18 +02:00
|
|
|
from queues import objectProcessorQueue, portCheckerQueue, invQueue, addrQueue
|
2017-04-16 18:27:15 +02:00
|
|
|
import shared
|
2017-05-24 16:51:49 +02:00
|
|
|
import state
|
2017-03-11 11:12:08 +01:00
|
|
|
import protocol
|
|
|
|
|
2017-12-29 08:49:08 +01:00
|
|
|
class BMProtoError(ProxyError):
|
|
|
|
errorCodes = ("Protocol error")
|
2017-03-11 11:12:08 +01:00
|
|
|
|
|
|
|
|
2017-12-29 08:49:08 +01:00
|
|
|
class BMProtoInsufficientDataError(BMProtoError):
|
|
|
|
errorCodes = ("Insufficient data")
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
|
2017-12-29 08:49:08 +01:00
|
|
|
class BMProtoExcessiveDataError(BMProtoError):
|
|
|
|
errorCodes = ("Too much data")
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
|
2017-05-27 19:09:21 +02:00
|
|
|
class BMProto(AdvancedDispatcher, ObjectTracker):
|
2017-03-20 18:32:26 +01:00
|
|
|
# ~1.6 MB which is the maximum possible size of an inv message.
|
|
|
|
maxMessageSize = 1600100
|
2017-05-24 16:51:49 +02:00
|
|
|
# 2**18 = 256kB is the maximum size of an object payload
|
|
|
|
maxObjectPayloadSize = 2**18
|
2017-04-04 10:46:01 +02:00
|
|
|
# protocol specification says max 1000 addresses in one addr command
|
|
|
|
maxAddrCount = 1000
|
|
|
|
# protocol specification says max 50000 objects in one inv command
|
|
|
|
maxObjectCount = 50000
|
2017-05-27 19:09:21 +02:00
|
|
|
# address is online if online less than this many seconds ago
|
|
|
|
addressAlive = 10800
|
|
|
|
# maximum time offset
|
|
|
|
maxTimeOffset = 3600
|
|
|
|
|
2017-05-29 00:24:07 +02:00
|
|
|
def __init__(self, address=None, sock=None):
|
|
|
|
AdvancedDispatcher.__init__(self, sock)
|
|
|
|
self.isOutbound = False
|
|
|
|
# packet/connection from a local IP
|
|
|
|
self.local = False
|
2017-03-20 18:32:26 +01:00
|
|
|
|
|
|
|
def bm_proto_reset(self):
|
|
|
|
self.magic = None
|
|
|
|
self.command = None
|
2017-05-24 16:51:49 +02:00
|
|
|
self.payloadLength = 0
|
2017-03-20 18:32:26 +01:00
|
|
|
self.checksum = None
|
2017-03-11 11:12:08 +01:00
|
|
|
self.payload = None
|
2017-03-20 18:32:26 +01:00
|
|
|
self.invalid = False
|
2017-04-16 18:27:15 +02:00
|
|
|
self.payloadOffset = 0
|
2017-05-27 22:30:30 +02:00
|
|
|
self.expectBytes = protocol.Header.size
|
2017-05-24 16:51:49 +02:00
|
|
|
self.object = None
|
2017-03-11 11:12:08 +01:00
|
|
|
|
2017-03-20 18:32:26 +01:00
|
|
|
def state_bm_header(self):
|
|
|
|
self.magic, self.command, self.payloadLength, self.checksum = protocol.Header.unpack(self.read_buf[:protocol.Header.size])
|
|
|
|
self.command = self.command.rstrip('\x00')
|
|
|
|
if self.magic != 0xE9BEB4D9:
|
|
|
|
# skip 1 byte in order to sync
|
2017-07-06 19:45:36 +02:00
|
|
|
self.set_state("bm_header", length=1)
|
2017-03-20 18:32:26 +01:00
|
|
|
self.bm_proto_reset()
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("Bad magic")
|
2017-10-19 09:00:02 +02:00
|
|
|
if self.socket.type == socket.SOCK_STREAM:
|
|
|
|
self.close_reason = "Bad magic"
|
|
|
|
self.set_state("close")
|
2017-05-25 23:04:33 +02:00
|
|
|
return False
|
2017-05-27 19:09:21 +02:00
|
|
|
if self.payloadLength > BMProto.maxMessageSize:
|
2017-03-20 18:32:26 +01:00
|
|
|
self.invalid = True
|
2017-05-31 10:17:36 +02:00
|
|
|
self.set_state("bm_command", length=protocol.Header.size, expectBytes=self.payloadLength)
|
2017-03-20 18:32:26 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
def state_bm_command(self):
|
|
|
|
self.payload = self.read_buf[:self.payloadLength]
|
|
|
|
if self.checksum != hashlib.sha512(self.payload).digest()[0:4]:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("Bad checksum, ignoring")
|
2017-03-20 18:32:26 +01:00
|
|
|
self.invalid = True
|
2017-04-04 10:46:01 +02:00
|
|
|
retval = True
|
2017-05-29 00:47:41 +02:00
|
|
|
if not self.fullyEstablished and self.command not in ("error", "version", "verack"):
|
2017-05-24 16:51:49 +02:00
|
|
|
logger.error("Received command %s before connection was fully established, ignoring", self.command)
|
|
|
|
self.invalid = True
|
2017-03-20 18:32:26 +01:00
|
|
|
if not self.invalid:
|
|
|
|
try:
|
2017-04-04 10:46:01 +02:00
|
|
|
retval = getattr(self, "bm_command_" + str(self.command).lower())()
|
2017-03-20 18:32:26 +01:00
|
|
|
except AttributeError:
|
|
|
|
# unimplemented command
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("unimplemented command %s", self.command)
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMProtoInsufficientDataError:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("packet length too short, skipping")
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMProtoExcessiveDataError:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("too much data, skipping")
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectInsufficientPOWError:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("insufficient PoW, skipping")
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectInvalidDataError:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("object invalid data, skipping")
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectExpiredError:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("object expired, skipping")
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectUnwantedStreamError:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("object not in wanted stream, skipping")
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectInvalidError:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("object invalid, skipping")
|
2017-05-24 16:51:49 +02:00
|
|
|
except BMObjectAlreadyHaveError:
|
2017-06-21 12:16:33 +02:00
|
|
|
logger.debug("%s:%i already got object, skipping", self.destination.host, self.destination.port)
|
2017-05-24 16:51:49 +02:00
|
|
|
except struct.error:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("decoding error, skipping")
|
2017-10-19 01:46:32 +02:00
|
|
|
elif self.socket.type == socket.SOCK_DGRAM:
|
|
|
|
# broken read, ignore
|
|
|
|
pass
|
2017-03-11 11:12:08 +01:00
|
|
|
else:
|
2017-05-25 23:04:33 +02:00
|
|
|
#print "Skipping command %s due to invalid data" % (self.command)
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("Closing due to invalid command %s", self.command)
|
2017-10-19 09:08:05 +02:00
|
|
|
self.close_reason = "Invalid command %s" % (self.command)
|
|
|
|
self.set_state("close")
|
2017-05-25 23:04:33 +02:00
|
|
|
return False
|
2017-04-04 10:46:01 +02:00
|
|
|
if retval:
|
2017-07-06 19:45:36 +02:00
|
|
|
self.set_state("bm_header", length=self.payloadLength)
|
2017-04-04 10:46:01 +02:00
|
|
|
self.bm_proto_reset()
|
|
|
|
# else assume the command requires a different state to follow
|
2017-03-20 18:32:26 +01:00
|
|
|
return True
|
2017-03-11 11:12:08 +01:00
|
|
|
|
2017-04-16 18:27:15 +02:00
|
|
|
def decode_payload_string(self, length):
|
|
|
|
value = self.payload[self.payloadOffset:self.payloadOffset+length]
|
|
|
|
self.payloadOffset += length
|
|
|
|
return value
|
|
|
|
|
|
|
|
def decode_payload_varint(self):
|
|
|
|
value, offset = addresses.decodeVarint(self.payload[self.payloadOffset:])
|
|
|
|
self.payloadOffset += offset
|
|
|
|
return value
|
|
|
|
|
|
|
|
def decode_payload_node(self):
|
2017-05-24 16:51:49 +02:00
|
|
|
services, host, port = self.decode_payload_content("Q16sH")
|
|
|
|
if host[0:12] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF':
|
2017-10-20 01:07:30 +02:00
|
|
|
host = socket.inet_ntop(socket.AF_INET, str(host[12:16]))
|
2017-05-24 16:51:49 +02:00
|
|
|
elif host[0:6] == '\xfd\x87\xd8\x7e\xeb\x43':
|
|
|
|
# Onion, based on BMD/bitcoind
|
|
|
|
host = base64.b32encode(host[6:]).lower() + ".onion"
|
|
|
|
else:
|
2017-10-20 01:07:30 +02:00
|
|
|
host = socket.inet_ntop(socket.AF_INET6, str(host))
|
2017-05-24 16:51:49 +02:00
|
|
|
if host == "":
|
|
|
|
# This can happen on Windows systems which are not 64-bit compatible
|
|
|
|
# so let us drop the IPv6 address.
|
2017-10-20 01:07:30 +02:00
|
|
|
host = socket.inet_ntop(socket.AF_INET, str(host[12:16]))
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
return Node(services, host, port)
|
2017-04-16 18:27:15 +02:00
|
|
|
|
|
|
|
def decode_payload_content(self, pattern = "v"):
|
2017-07-10 07:15:27 +02:00
|
|
|
# L = varint indicating the length of the next array
|
|
|
|
# l = varint indicating the length of the next item
|
2017-04-16 18:27:15 +02:00
|
|
|
# v = varint (or array)
|
|
|
|
# H = uint16
|
|
|
|
# I = uint32
|
|
|
|
# Q = uint64
|
|
|
|
# i = net_addr (without time and stream number)
|
|
|
|
# s = string
|
|
|
|
# 0-9 = length of the next item
|
|
|
|
# , = end of array
|
|
|
|
|
2017-07-10 07:15:27 +02:00
|
|
|
def decode_simple(self, char="v"):
|
|
|
|
if char == "v":
|
|
|
|
return self.decode_payload_varint()
|
|
|
|
if char == "i":
|
|
|
|
return self.decode_payload_node()
|
|
|
|
if char == "H":
|
|
|
|
self.payloadOffset += 2
|
|
|
|
return struct.unpack(">H", self.payload[self.payloadOffset-2:self.payloadOffset])[0]
|
|
|
|
if char == "I":
|
|
|
|
self.payloadOffset += 4
|
|
|
|
return struct.unpack(">I", self.payload[self.payloadOffset-4:self.payloadOffset])[0]
|
|
|
|
if char == "Q":
|
|
|
|
self.payloadOffset += 8
|
|
|
|
return struct.unpack(">Q", self.payload[self.payloadOffset-8:self.payloadOffset])[0]
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-07-10 07:15:27 +02:00
|
|
|
size = None
|
|
|
|
isArray = False
|
|
|
|
|
|
|
|
# size
|
|
|
|
# iterator starting from size counting to 0
|
|
|
|
# isArray?
|
|
|
|
# subpattern
|
|
|
|
# position of parser in subpattern
|
|
|
|
# retval (array)
|
|
|
|
parserStack = [[1, 1, False, pattern, 0, []]]
|
|
|
|
|
|
|
|
#try:
|
|
|
|
# sys._getframe(200)
|
|
|
|
# logger.error("Stack depth warning, pattern: %s", pattern)
|
|
|
|
# return
|
|
|
|
#except ValueError:
|
|
|
|
# pass
|
|
|
|
|
|
|
|
while True:
|
|
|
|
i = parserStack[-1][3][parserStack[-1][4]]
|
|
|
|
if i in "0123456789" and (size is None or parserStack[-1][3][parserStack[-1][4]-1] not in "lL"):
|
|
|
|
try:
|
|
|
|
size = size * 10 + int(i)
|
|
|
|
except TypeError:
|
|
|
|
size = int(i)
|
|
|
|
isArray = False
|
|
|
|
elif i in "Ll" and size is None:
|
2017-05-24 16:51:49 +02:00
|
|
|
size = self.decode_payload_varint()
|
2017-07-10 07:15:27 +02:00
|
|
|
if i == "L":
|
|
|
|
isArray = True
|
2017-04-16 18:27:15 +02:00
|
|
|
else:
|
2017-07-10 07:15:27 +02:00
|
|
|
isArray = False
|
|
|
|
elif size is not None:
|
|
|
|
if isArray:
|
|
|
|
parserStack.append([size, size, isArray, parserStack[-1][3][parserStack[-1][4]:], 0, []])
|
|
|
|
parserStack[-2][4] = len(parserStack[-2][3])
|
|
|
|
else:
|
|
|
|
for j in range(parserStack[-1][4], len(parserStack[-1][3])):
|
|
|
|
if parserStack[-1][3][j] not in "lL0123456789":
|
|
|
|
break
|
|
|
|
parserStack.append([size, size, isArray, parserStack[-1][3][parserStack[-1][4]:j+1], 0, []])
|
2017-07-11 10:29:29 +02:00
|
|
|
parserStack[-2][4] += len(parserStack[-1][3]) - 1
|
2017-07-10 07:15:27 +02:00
|
|
|
size = None
|
|
|
|
continue
|
|
|
|
elif i == "s":
|
|
|
|
#if parserStack[-2][2]:
|
|
|
|
# parserStack[-1][5].append(self.payload[self.payloadOffset:self.payloadOffset + parserStack[-1][0]])
|
|
|
|
#else:
|
|
|
|
parserStack[-1][5] = self.payload[self.payloadOffset:self.payloadOffset + parserStack[-1][0]]
|
|
|
|
self.payloadOffset += parserStack[-1][0]
|
|
|
|
parserStack[-1][1] = 0
|
|
|
|
parserStack[-1][2] = True
|
|
|
|
#del parserStack[-1]
|
|
|
|
size = None
|
|
|
|
elif i in "viHIQ":
|
|
|
|
parserStack[-1][5].append(decode_simple(self, parserStack[-1][3][parserStack[-1][4]]))
|
2017-05-24 16:51:49 +02:00
|
|
|
size = None
|
2017-04-16 18:27:15 +02:00
|
|
|
else:
|
2017-07-10 07:15:27 +02:00
|
|
|
size = None
|
|
|
|
for depth in range(len(parserStack) - 1, -1, -1):
|
|
|
|
parserStack[depth][4] += 1
|
|
|
|
if parserStack[depth][4] >= len(parserStack[depth][3]):
|
|
|
|
parserStack[depth][1] -= 1
|
|
|
|
parserStack[depth][4] = 0
|
|
|
|
if depth > 0:
|
|
|
|
if parserStack[depth][2]:
|
|
|
|
parserStack[depth - 1][5].append(parserStack[depth][5])
|
|
|
|
else:
|
|
|
|
parserStack[depth - 1][5].extend(parserStack[depth][5])
|
|
|
|
parserStack[depth][5] = []
|
|
|
|
if parserStack[depth][1] <= 0:
|
|
|
|
if depth == 0:
|
|
|
|
# we're done, at depth 0 counter is at 0 and pattern is done parsing
|
|
|
|
return parserStack[depth][5]
|
|
|
|
del parserStack[-1]
|
|
|
|
continue
|
|
|
|
break
|
|
|
|
break
|
2017-05-24 16:51:49 +02:00
|
|
|
if self.payloadOffset > self.payloadLength:
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.debug("Insufficient data %i/%i", self.payloadOffset, self.payloadLength)
|
2017-05-24 16:51:49 +02:00
|
|
|
raise BMProtoInsufficientDataError()
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-04-04 10:46:01 +02:00
|
|
|
def bm_command_error(self):
|
2017-04-16 18:27:15 +02:00
|
|
|
fatalStatus, banTime, inventoryVector, errorText = self.decode_payload_content("vvlsls")
|
2017-05-29 00:24:07 +02:00
|
|
|
logger.error("%s:%i error: %i, %s", self.destination.host, self.destination.port, fatalStatus, errorText)
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-04-04 10:46:01 +02:00
|
|
|
def bm_command_getdata(self):
|
2017-07-10 07:15:27 +02:00
|
|
|
items = self.decode_payload_content("l32s")
|
2017-07-05 09:27:52 +02:00
|
|
|
# skip?
|
|
|
|
if time.time() < self.skipUntil:
|
|
|
|
return True
|
2017-09-25 01:17:04 +02:00
|
|
|
#TODO make this more asynchronous
|
|
|
|
random.shuffle(items)
|
2017-10-16 08:07:32 +02:00
|
|
|
for i in map(str, items):
|
2018-02-03 11:46:39 +01:00
|
|
|
if Dandelion().hasHash(i) and \
|
|
|
|
self != Dandelion().objectChildStem(i):
|
2017-07-06 19:45:36 +02:00
|
|
|
self.antiIntersectionDelay()
|
2017-09-25 01:17:04 +02:00
|
|
|
logger.info('%s asked for a stem object we didn\'t offer to it.', self.destination)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
self.append_write_buf(protocol.CreatePacket('object', Inventory()[i].payload))
|
|
|
|
except KeyError:
|
|
|
|
self.antiIntersectionDelay()
|
|
|
|
logger.info('%s asked for an object we don\'t have.', self.destination)
|
|
|
|
break
|
|
|
|
# I think that aborting after the first missing/stem object is more secure
|
|
|
|
# when using random reordering, as the recipient won't know exactly which objects we refuse to deliver
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
|
|
|
|
2018-02-03 11:46:39 +01:00
|
|
|
def _command_inv(self, dandelion=False):
|
2017-07-10 07:15:27 +02:00
|
|
|
items = self.decode_payload_content("l32s")
|
2017-05-24 16:51:49 +02:00
|
|
|
|
2017-05-27 19:09:21 +02:00
|
|
|
if len(items) >= BMProto.maxObjectCount:
|
2018-02-03 11:46:39 +01:00
|
|
|
logger.error("Too many items in %sinv message!", "d" if dandelion else "")
|
2017-05-24 16:51:49 +02:00
|
|
|
raise BMProtoExcessiveDataError()
|
|
|
|
else:
|
2017-05-24 21:15:36 +02:00
|
|
|
pass
|
2017-05-24 16:51:49 +02:00
|
|
|
|
2018-02-03 11:46:39 +01:00
|
|
|
# ignore dinv if dandelion turned off
|
|
|
|
if dandelion and not state.dandelion:
|
|
|
|
return True
|
|
|
|
|
2017-10-16 08:07:32 +02:00
|
|
|
for i in map(str, items):
|
2018-02-03 11:46:39 +01:00
|
|
|
if i in Inventory() and not Dandelion().hasHash(i):
|
|
|
|
continue
|
|
|
|
if dandelion and not Dandelion().hasHash(i):
|
|
|
|
Dandelion().addHash(i, self)
|
2017-07-06 19:45:36 +02:00
|
|
|
self.handleReceivedInventory(i)
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2018-02-03 11:46:39 +01:00
|
|
|
def bm_command_inv(self):
|
|
|
|
return self._command_inv(False)
|
|
|
|
|
2017-09-25 01:17:04 +02:00
|
|
|
def bm_command_dinv(self):
|
|
|
|
"""
|
|
|
|
Dandelion stem announce
|
|
|
|
"""
|
2018-02-03 11:46:39 +01:00
|
|
|
return self._command_inv(True)
|
2017-09-25 01:17:04 +02:00
|
|
|
|
2017-04-04 10:46:01 +02:00
|
|
|
def bm_command_object(self):
|
2017-05-24 16:51:49 +02:00
|
|
|
objectOffset = self.payloadOffset
|
|
|
|
nonce, expiresTime, objectType, version, streamNumber = self.decode_payload_content("QQIvv")
|
2017-06-24 12:21:06 +02:00
|
|
|
self.object = BMObject(nonce, expiresTime, objectType, version, streamNumber, self.payload, self.payloadOffset)
|
2017-05-24 16:51:49 +02:00
|
|
|
|
2017-05-27 19:09:21 +02:00
|
|
|
if len(self.payload) - self.payloadOffset > BMProto.maxObjectPayloadSize:
|
2017-05-24 16:51:49 +02:00
|
|
|
logger.info('The payload length of this object is too large (%s bytes). Ignoring it.' % len(self.payload) - self.payloadOffset)
|
|
|
|
raise BMProtoExcessiveDataError()
|
|
|
|
|
2017-06-02 15:43:35 +02:00
|
|
|
try:
|
2017-10-20 13:21:39 +02:00
|
|
|
self.object.checkProofOfWorkSufficient()
|
2017-06-02 15:43:35 +02:00
|
|
|
self.object.checkEOLSanity()
|
2017-06-21 12:16:33 +02:00
|
|
|
self.object.checkAlreadyHave()
|
2017-10-20 13:21:39 +02:00
|
|
|
except (BMObjectExpiredError, BMObjectAlreadyHaveError, BMObjectInsufficientPOWError) as e:
|
2017-07-05 09:25:49 +02:00
|
|
|
BMProto.stopDownloadingObject(self.object.inventoryHash)
|
2017-06-24 12:22:41 +02:00
|
|
|
raise e
|
|
|
|
try:
|
|
|
|
self.object.checkStream()
|
|
|
|
except (BMObjectUnwantedStreamError,) as e:
|
2017-07-05 09:25:49 +02:00
|
|
|
BMProto.stopDownloadingObject(self.object.inventoryHash, BMConfigParser().get("inventory", "acceptmismatch"))
|
2017-06-24 12:22:41 +02:00
|
|
|
if not BMConfigParser().get("inventory", "acceptmismatch"):
|
2017-06-21 12:16:33 +02:00
|
|
|
raise e
|
2017-05-30 23:53:43 +02:00
|
|
|
|
2017-07-05 09:25:49 +02:00
|
|
|
try:
|
|
|
|
self.object.checkObjectByType()
|
2017-10-16 08:07:32 +02:00
|
|
|
objectProcessorQueue.put((self.object.objectType, buffer(self.object.data)))
|
2017-07-05 09:25:49 +02:00
|
|
|
except BMObjectInvalidError as e:
|
|
|
|
BMProto.stopDownloadingObject(self.object.inventoryHash, True)
|
2017-10-20 23:21:25 +02:00
|
|
|
else:
|
|
|
|
try:
|
|
|
|
del state.missingObjects[self.object.inventoryHash]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2017-06-24 12:21:06 +02:00
|
|
|
|
2018-02-03 11:46:39 +01:00
|
|
|
if self.object.inventoryHash in Inventory() and Dandelion().hasHash(self.object.inventoryHash):
|
|
|
|
Dandelion().removeHash(self.object.inventoryHash, "cycle detection")
|
|
|
|
|
2017-05-30 23:53:43 +02:00
|
|
|
Inventory()[self.object.inventoryHash] = (
|
2017-10-16 08:07:32 +02:00
|
|
|
self.object.objectType, self.object.streamNumber, buffer(self.payload[objectOffset:]), self.object.expiresTime, buffer(self.object.tag))
|
2018-02-03 11:46:39 +01:00
|
|
|
self.handleReceivedObject(self.object.streamNumber, self.object.inventoryHash)
|
2017-07-08 18:02:47 +02:00
|
|
|
invQueue.put((self.object.streamNumber, self.object.inventoryHash, self.destination))
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-05-27 19:09:21 +02:00
|
|
|
def _decode_addr(self):
|
2017-07-10 07:15:27 +02:00
|
|
|
return self.decode_payload_content("LQIQ16sH")
|
2017-05-27 19:09:21 +02:00
|
|
|
|
2017-04-16 18:27:15 +02:00
|
|
|
def bm_command_addr(self):
|
2017-05-27 19:09:21 +02:00
|
|
|
addresses = self._decode_addr()
|
2017-05-25 23:04:33 +02:00
|
|
|
for i in addresses:
|
|
|
|
seenTime, stream, services, ip, port = i
|
2017-10-20 01:07:30 +02:00
|
|
|
decodedIP = protocol.checkIPAddress(str(ip))
|
2017-05-25 23:04:33 +02:00
|
|
|
if stream not in state.streamsInWhichIAmParticipating:
|
|
|
|
continue
|
2017-05-27 19:09:21 +02:00
|
|
|
if decodedIP is not False and seenTime > time.time() - BMProto.addressAlive:
|
2017-05-25 23:04:33 +02:00
|
|
|
peer = state.Peer(decodedIP, port)
|
2017-10-19 08:52:44 +02:00
|
|
|
try:
|
|
|
|
if knownnodes.knownNodes[stream][peer]["lastseen"] > seenTime:
|
|
|
|
continue
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
if len(knownnodes.knownNodes[stream]) < int(BMConfigParser().get("knownnodes", "maxnodes")):
|
2017-05-29 00:24:07 +02:00
|
|
|
with knownnodes.knownNodesLock:
|
2017-07-05 09:17:01 +02:00
|
|
|
try:
|
|
|
|
knownnodes.knownNodes[stream][peer]["lastseen"] = seenTime
|
|
|
|
except (TypeError, KeyError):
|
|
|
|
knownnodes.knownNodes[stream][peer] = {
|
|
|
|
"lastseen": seenTime,
|
|
|
|
"rating": 0,
|
|
|
|
"self": False,
|
|
|
|
}
|
2017-07-08 18:02:47 +02:00
|
|
|
addrQueue.put((stream, peer, self.destination))
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-05-25 14:59:18 +02:00
|
|
|
def bm_command_portcheck(self):
|
|
|
|
portCheckerQueue.put(state.Peer(self.destination, self.peerNode.port))
|
|
|
|
return True
|
|
|
|
|
2017-04-04 10:46:01 +02:00
|
|
|
def bm_command_ping(self):
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.CreatePacket('pong'))
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-16 18:27:15 +02:00
|
|
|
|
2017-04-04 10:46:01 +02:00
|
|
|
def bm_command_pong(self):
|
2017-04-16 18:27:15 +02:00
|
|
|
# nothing really
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-04 10:46:01 +02:00
|
|
|
|
2017-03-20 18:32:26 +01:00
|
|
|
def bm_command_verack(self):
|
2017-03-11 11:12:08 +01:00
|
|
|
self.verackReceived = True
|
2017-04-16 18:27:15 +02:00
|
|
|
if self.verackSent:
|
|
|
|
if self.isSSL:
|
2017-07-06 19:45:36 +02:00
|
|
|
self.set_state("tls_init", length=self.payloadLength, expectBytes=0)
|
2017-05-24 16:51:49 +02:00
|
|
|
return False
|
2017-07-06 19:45:36 +02:00
|
|
|
self.set_state("connection_fully_established", length=self.payloadLength, expectBytes=0)
|
|
|
|
return False
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-03-11 11:12:08 +01:00
|
|
|
|
2017-03-20 18:32:26 +01:00
|
|
|
def bm_command_version(self):
|
2017-06-24 12:13:35 +02:00
|
|
|
self.remoteProtocolVersion, self.services, self.timestamp, self.sockNode, self.peerNode, self.nonce, \
|
2017-07-10 07:15:27 +02:00
|
|
|
self.userAgent, self.streams = self.decode_payload_content("IQQiiQlsLv")
|
2017-05-27 19:09:21 +02:00
|
|
|
self.nonce = struct.pack('>Q', self.nonce)
|
2017-04-16 18:27:15 +02:00
|
|
|
self.timeOffset = self.timestamp - int(time.time())
|
2017-05-27 22:30:30 +02:00
|
|
|
logger.debug("remoteProtocolVersion: %i", self.remoteProtocolVersion)
|
2017-07-06 19:45:36 +02:00
|
|
|
logger.debug("services: 0x%08X", self.services)
|
2017-05-27 22:30:30 +02:00
|
|
|
logger.debug("time offset: %i", self.timestamp - int(time.time()))
|
|
|
|
logger.debug("my external IP: %s", self.sockNode.host)
|
2017-07-06 19:45:36 +02:00
|
|
|
logger.debug("remote node incoming address: %s:%i", self.destination.host, self.peerNode.port)
|
2017-05-27 22:30:30 +02:00
|
|
|
logger.debug("user agent: %s", self.userAgent)
|
|
|
|
logger.debug("streams: [%s]", ",".join(map(str,self.streams)))
|
2017-04-04 10:46:01 +02:00
|
|
|
if not self.peerValidityChecks():
|
|
|
|
# TODO ABORT
|
|
|
|
return True
|
2017-05-25 14:59:18 +02:00
|
|
|
#shared.connectedHostsList[self.destination] = self.streams[0]
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.CreatePacket('verack'))
|
2017-04-04 10:46:01 +02:00
|
|
|
self.verackSent = True
|
2017-05-24 16:51:49 +02:00
|
|
|
if not self.isOutbound:
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.assembleVersionMessage(self.destination.host, self.destination.port, \
|
2017-07-10 07:12:52 +02:00
|
|
|
network.connectionpool.BMConnectionPool().streams, True, nodeid=self.nodeid))
|
2017-05-29 00:24:07 +02:00
|
|
|
#print "%s:%i: Sending version" % (self.destination.host, self.destination.port)
|
2017-04-04 10:46:01 +02:00
|
|
|
if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) and
|
|
|
|
protocol.haveSSL(not self.isOutbound)):
|
|
|
|
self.isSSL = True
|
|
|
|
if self.verackReceived:
|
|
|
|
if self.isSSL:
|
2017-07-06 19:45:36 +02:00
|
|
|
self.set_state("tls_init", length=self.payloadLength, expectBytes=0)
|
2017-05-24 16:51:49 +02:00
|
|
|
return False
|
2017-07-06 19:45:36 +02:00
|
|
|
self.set_state("connection_fully_established", length=self.payloadLength, expectBytes=0)
|
|
|
|
return False
|
2017-05-24 16:51:49 +02:00
|
|
|
return True
|
2017-04-04 10:46:01 +02:00
|
|
|
|
|
|
|
def peerValidityChecks(self):
|
|
|
|
if self.remoteProtocolVersion < 3:
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
|
2017-04-16 18:27:15 +02:00
|
|
|
errorText="Your is using an old protocol. Closing connection."))
|
2017-04-04 10:46:01 +02:00
|
|
|
logger.debug ('Closing connection to old protocol version %s, node: %s',
|
2017-05-27 19:09:21 +02:00
|
|
|
str(self.remoteProtocolVersion), str(self.destination))
|
2017-04-04 10:46:01 +02:00
|
|
|
return False
|
2017-05-27 19:09:21 +02:00
|
|
|
if self.timeOffset > BMProto.maxTimeOffset:
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
|
2017-04-16 18:27:15 +02:00
|
|
|
errorText="Your time is too far in the future compared to mine. Closing connection."))
|
2017-04-04 10:46:01 +02:00
|
|
|
logger.info("%s's time is too far in the future (%s seconds). Closing connection to it.",
|
2017-05-27 19:09:21 +02:00
|
|
|
self.destination, self.timeOffset)
|
2017-04-04 10:46:01 +02:00
|
|
|
shared.timeOffsetWrongCount += 1
|
|
|
|
return False
|
2017-05-27 19:09:21 +02:00
|
|
|
elif self.timeOffset < -BMProto.maxTimeOffset:
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
|
2017-04-16 18:27:15 +02:00
|
|
|
errorText="Your time is too far in the past compared to mine. Closing connection."))
|
2017-04-04 10:46:01 +02:00
|
|
|
logger.info("%s's time is too far in the past (timeOffset %s seconds). Closing connection to it.",
|
2017-05-27 19:09:21 +02:00
|
|
|
self.destination, self.timeOffset)
|
2017-04-04 10:46:01 +02:00
|
|
|
shared.timeOffsetWrongCount += 1
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
shared.timeOffsetWrongCount = 0
|
2017-06-24 12:13:35 +02:00
|
|
|
if not self.streams:
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
|
2017-04-16 18:27:15 +02:00
|
|
|
errorText="We don't have shared stream interests. Closing connection."))
|
2017-04-04 10:46:01 +02:00
|
|
|
logger.debug ('Closed connection to %s because there is no overlapping interest in streams.',
|
2017-05-27 19:09:21 +02:00
|
|
|
str(self.destination))
|
2017-04-04 10:46:01 +02:00
|
|
|
return False
|
2017-05-24 16:51:49 +02:00
|
|
|
if self.destination in network.connectionpool.BMConnectionPool().inboundConnections:
|
|
|
|
try:
|
|
|
|
if not protocol.checkSocksIP(self.destination.host):
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
|
2017-05-24 16:51:49 +02:00
|
|
|
errorText="Too many connections from your IP. Closing connection."))
|
|
|
|
logger.debug ('Closed connection to %s because we are already connected to that IP.',
|
2017-05-27 19:09:21 +02:00
|
|
|
str(self.destination))
|
2017-05-24 16:51:49 +02:00
|
|
|
return False
|
|
|
|
except:
|
|
|
|
pass
|
2018-01-02 14:29:21 +01:00
|
|
|
if not self.isOutbound:
|
|
|
|
# incoming from a peer we're connected to as outbound, or server full
|
|
|
|
# report the same error to counter deanonymisation
|
|
|
|
if state.Peer(self.destination.host, self.peerNode.port) in \
|
|
|
|
network.connectionpool.BMConnectionPool().inboundConnections or \
|
|
|
|
len(network.connectionpool.BMConnectionPool().inboundConnections) + \
|
|
|
|
len(network.connectionpool.BMConnectionPool().outboundConnections) > \
|
|
|
|
BMConfigParser().safeGetInt("bitmessagesettings", "maxtotalconnections") + \
|
|
|
|
BMConfigParser().safeGetInt("bitmessagesettings", "maxbootstrapconnections"):
|
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
|
|
|
|
errorText="Server full, please try again later."))
|
|
|
|
logger.debug ("Closed connection to %s due to server full or duplicate inbound/outbound.",
|
|
|
|
str(self.destination))
|
|
|
|
return False
|
2017-07-10 07:12:52 +02:00
|
|
|
if network.connectionpool.BMConnectionPool().isAlreadyConnected(self.nonce):
|
2017-07-06 19:45:36 +02:00
|
|
|
self.append_write_buf(protocol.assembleErrorMessage(fatal=2,
|
2017-05-27 19:09:21 +02:00
|
|
|
errorText="I'm connected to myself. Closing connection."))
|
|
|
|
logger.debug ("Closed connection to %s because I'm connected to myself.",
|
|
|
|
str(self.destination))
|
2017-05-29 14:41:02 +02:00
|
|
|
return False
|
2017-05-27 19:09:21 +02:00
|
|
|
|
2017-03-20 18:32:26 +01:00
|
|
|
return True
|
2017-03-11 11:12:08 +01:00
|
|
|
|
2017-05-27 19:09:21 +02:00
|
|
|
@staticmethod
|
|
|
|
def assembleAddr(peerList):
|
2017-06-24 12:13:35 +02:00
|
|
|
if isinstance(peerList, state.Peer):
|
2017-05-27 19:09:21 +02:00
|
|
|
peerList = (peerList)
|
2017-07-05 09:17:01 +02:00
|
|
|
if not peerList:
|
|
|
|
return b''
|
|
|
|
retval = b''
|
|
|
|
for i in range(0, len(peerList), BMProto.maxAddrCount):
|
|
|
|
payload = addresses.encodeVarint(len(peerList[i:i + BMProto.maxAddrCount]))
|
|
|
|
for address in peerList[i:i + BMProto.maxAddrCount]:
|
|
|
|
stream, peer, timestamp = address
|
|
|
|
payload += struct.pack(
|
|
|
|
'>Q', timestamp) # 64-bit time
|
|
|
|
payload += struct.pack('>I', stream)
|
|
|
|
payload += struct.pack(
|
|
|
|
'>q', 1) # service bit flags offered by this node
|
|
|
|
payload += protocol.encodeHost(peer.host)
|
|
|
|
payload += struct.pack('>H', peer.port) # remote port
|
|
|
|
retval += protocol.CreatePacket('addr', payload)
|
|
|
|
return retval
|
2017-04-04 10:46:01 +02:00
|
|
|
|
2017-07-05 09:25:49 +02:00
|
|
|
@staticmethod
|
|
|
|
def stopDownloadingObject(hashId, forwardAnyway=False):
|
|
|
|
for connection in network.connectionpool.BMConnectionPool().inboundConnections.values() + \
|
|
|
|
network.connectionpool.BMConnectionPool().outboundConnections.values():
|
|
|
|
try:
|
2018-02-01 12:19:39 +01:00
|
|
|
del connection.objectsNewToMe[hashId]
|
2017-07-05 09:25:49 +02:00
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
if not forwardAnyway:
|
|
|
|
try:
|
|
|
|
with connection.objectsNewToThemLock:
|
|
|
|
del connection.objectsNewToThem[hashId]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2017-10-20 23:11:33 +02:00
|
|
|
try:
|
|
|
|
del state.missingObjects[hashId]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2017-07-05 09:25:49 +02:00
|
|
|
|
2017-10-19 09:08:05 +02:00
|
|
|
def handle_close(self):
|
2017-05-25 23:04:33 +02:00
|
|
|
self.set_state("close")
|
2017-11-17 13:37:51 +01:00
|
|
|
if not (self.accepting or self.connecting or self.connected):
|
|
|
|
# already disconnected
|
|
|
|
return
|
2017-10-19 09:08:05 +02:00
|
|
|
try:
|
|
|
|
logger.debug("%s:%i: closing, %s", self.destination.host, self.destination.port, self.close_reason)
|
|
|
|
except AttributeError:
|
2017-06-10 10:13:49 +02:00
|
|
|
try:
|
|
|
|
logger.debug("%s:%i: closing", self.destination.host, self.destination.port)
|
|
|
|
except AttributeError:
|
|
|
|
logger.debug("Disconnected socket closing")
|
2017-06-02 07:09:35 +02:00
|
|
|
AdvancedDispatcher.handle_close(self)
|