880 lines
46 KiB
Raw Normal View History

doTimingAttackMitigation = False
2016-03-18 15:39:29 +00:00
import base64
import datetime
import errno
import math
import time
import threading
import shared
import hashlib
import os
import Queue
import select
import socket
import random
import ssl
from struct import unpack, pack
import sys
2014-08-27 07:14:32 +00:00
import traceback
2016-03-23 22:26:57 +00:00
from binascii import hexlify
#import string
#from subprocess import call # used when the API must execute an outside program
#from pyelliptic.openssl import OpenSSL
#import highlevelcrypto
from addresses import *
from bmconfigparser import BMConfigParser
from class_objectHashHolder import objectHashHolder
from helper_generic import addDataPadding, isHostInPrivateIPRange
2015-03-09 06:35:32 +00:00
from helper_sql import sqlQuery
import knownnodes
from debug import logger
import paths
import protocol
from inventory import Inventory, PendingDownloadQueue, PendingUpload
import queues
import state
import throttle
import tr
from version import softwareVersion
# This thread is created either by the synSenderThread(for outgoing
# connections) or the singleListenerThread(for incoming connections).
class receiveDataThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, name="receiveData") = ''
self.verackSent = False
self.verackReceived = False
def setup(
2013-12-30 03:36:23 +00:00
2013-12-30 03:36:23 +00:00
self.sock = sock
self.peer = state.Peer(HOST, port) = "receiveData-" +":", ".") # ":" log parser field separator
self.streamNumber = state.streamsInWhichIAmParticipating
self.remoteStreams = []
self.selfInitiatedConnections = selfInitiatedConnections
2013-12-30 03:36:23 +00:00
self.sendDataThreadQueue = sendDataThreadQueue # used to send commands and data to the sendDataThread
self.hostIdent = self.peer.port if ".onion" in BMConfigParser().get('bitmessagesettings', 'onionhostname') and protocol.checkSocksIP( else
self.hostIdent] = 0 # The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that an outgoingSynSender thread doesn't try to connect to it.
self.connectionIsOrWasFullyEstablished = False # set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections. = 0
if streamNumber == -1: # This was an incoming connection. Send out a version message if we accept the other node's version message.
self.initiatedConnection = False
self.initiatedConnection = True
for stream in self.streamNumber:
self.selfInitiatedConnections[stream][self] = 0
self.objectHashHolderInstance = objectHashHolderInstance
self.downloadQueue = PendingDownloadQueue()
self.startTime = time.time()
def run(self):
logger.debug('receiveDataThread starting. ID ' + str(id(self)) + '. The size of the shared.connectedHostsList is now ' + str(len(shared.connectedHostsList)))
while state.shutdown == 0:
dataLen = len(
2017-02-07 12:00:24 +00:00
isSSL = False
if (( & protocol.NODE_SSL == protocol.NODE_SSL) and
self.connectionIsOrWasFullyEstablished and
protocol.haveSSL(not self.initiatedConnection)):
2017-02-07 12:00:24 +00:00
isSSL = True
dataRecv = self.sslSock.recv(throttle.ReceiveThrottle().chunkSize)
dataRecv = self.sock.recv(throttle.ReceiveThrottle().chunkSize) += dataRecv
except socket.timeout:
if self.connectionIsOrWasFullyEstablished:
self.sendping("Still around!")
logger.error("Timeout during protocol initialisation")
2017-02-07 12:00:24 +00:00
except ssl.SSLError as err:
if err.errno == ssl.SSL_ERROR_WANT_READ:[self.sslSock], [], [], 10)
logger.debug('sock.recv retriable SSL error')
if err.errno is None and 'timed out' in str(err):
if self.connectionIsOrWasFullyEstablished:
self.sendping("Still around!")
logger.error ('SSL error: %i/%s', err.errno if err.errno else 0, str(err))
2017-02-07 12:00:24 +00:00
except socket.error as err:
if err.errno in (errno.EAGAIN, errno.EWOULDBLOCK) or \
(sys.platform.startswith('win') and \
2017-02-17 20:14:39 +00:00
err.errno == errno.WSAEWOULDBLOCK):
2017-02-07 12:00:24 +00:00[self.sslSock if isSSL else self.sock], [], [], 10)
logger.debug('sock.recv retriable error')
logger.error('sock.recv error. Closing receiveData thread, %s', str(err))
# print 'Received', repr(
2013-07-05 20:56:49 +00:00
if len( == dataLen: # If self.sock.recv returned no data:
logger.debug('Connection to ' + str(self.peer) + ' closed. Closing receiveData thread')
for stream in self.streamNumber:
del self.selfInitiatedConnections[stream][self]
except KeyError:
logger.debug('removed self (a receiveDataThread) from selfInitiatedConnections')
self.sendDataThreadQueue.put((0, 'shutdown','no data')) # commands the corresponding sendDataThread to shut itself down.
del shared.connectedHostsList[self.hostIdent]
except Exception as err:
logger.error('Could not delete ' + str(self.hostIdent) + ' from shared.connectedHostsList.' + str(err))
queues.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
logger.debug('receiveDataThread ending. ID ' + str(id(self)) + '. The size of the shared.connectedHostsList is now ' + str(len(shared.connectedHostsList)))
def antiIntersectionDelay(self, initial = False):
# estimated time for a small object to propagate across the whole network
delay = math.ceil(math.log(max(len(knownnodes.knownNodes[x]) for x in knownnodes.knownNodes) + 2, 20)) * (0.2 + objectHashHolder.size/2)
# take the stream with maximum amount of nodes
# +2 is to avoid problems with log(0) and log(1)
# 20 is avg connected nodes count
# 0.2 is avg message transmission time
now = time.time()
if initial and now - delay < self.startTime:
logger.debug("Initial sleeping for %.2fs", delay - (now - self.startTime))
time.sleep(delay - (now - self.startTime))
elif not initial:
logger.debug("Sleeping due to missing object for %.2fs", delay)
def checkTimeOffsetNotification(self):
if shared.timeOffsetWrongCount >= 4 and not self.connectionIsOrWasFullyEstablished:
queues.UISignalQueue.put(('updateStatusBar', tr._translate("MainWindow", "The time on your computer, %1, may be wrong. Please verify your settings.").arg("%H:%M:%S"))))
def processData(self):
if len( < protocol.Header.size: # if so little of the data has arrived that we can't even read the checksum then wait for more data.
magic,command,payloadLength,checksum = protocol.Header.unpack([:protocol.Header.size])
if magic != 0xE9BEB4D9: = ""
if payloadLength > 1600100: # ~1.6 MB which is the maximum possible size of an inv message.'The incoming message, which we have not yet download, is too large. Ignoring it. (unfortunately there is no way to tell the other node to stop sending it except to disconnect.) Message size: %s' % payloadLength) =[payloadLength + protocol.Header.size:]
del magic,command,payloadLength,checksum # we don't need these anymore and better to clean them now before the recursive call rather than after
if len( < payloadLength + protocol.Header.size: # check if the whole message has arrived yet.
payload =[protocol.Header.size:payloadLength + protocol.Header.size]
2014-07-14 22:01:56 +00:00
if checksum != hashlib.sha512(payload).digest()[0:4]: # test the checksum in the message.
logger.error('Checksum incorrect. Clearing this message.') =[payloadLength + protocol.Header.size:]
del magic,command,payloadLength,checksum,payload # better to clean up before the recursive call
# The time we've last seen this node is obviously right now since we
# just received valid data from it. So update the knownNodes list so
# that other peers can be made aware of its existance.
if self.initiatedConnection and self.connectionIsOrWasFullyEstablished: # The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port).
with knownnodes.knownNodesLock:
for stream in self.streamNumber:
knownnodes.knownNodes[stream][self.peer] = int(time.time())
#Strip the nulls
command = command.rstrip('\x00')
logger.debug('remoteCommand ' + repr(command) + ' from ' + str(self.peer))
2014-08-27 07:14:32 +00:00
#TODO: Use a dispatcher here
if command == 'error':
elif not self.connectionIsOrWasFullyEstablished:
2014-08-27 07:14:32 +00:00
if command == 'version':
elif command == 'verack':
if command == 'addr':
elif command == 'inv':
elif command == 'getdata':
elif command == 'object':
elif command == 'ping':
elif command == 'pong':
else:"Unknown command %s, ignoring", command)
2014-08-27 07:14:32 +00:00
except varintDecodeError as e:
logger.debug("There was a problem with a varint while processing a message from the wire. Some details: %s" % e)
except Exception as e:
logger.critical("Critical error in a receiveDataThread: \n%s" % traceback.format_exc())
del payload =[payloadLength + protocol.Header.size:] # take this message out and then process the next message
if == '': # if there are no more messages
toRequest = []
for i in range(len(self.downloadQueue.pending), 100):
while True:
hashId = self.downloadQueue.get(False)
if not hashId in Inventory():
# don't track download for duplicates
except Queue.Empty:
if len(toRequest) > 0:
def sendpong(self, payload):
logger.debug('Sending pong')
self.sendDataThreadQueue.put((0, 'sendRawData', protocol.CreatePacket('pong', payload)))
def sendping(self, payload):
logger.debug('Sending ping')
self.sendDataThreadQueue.put((0, 'sendRawData', protocol.CreatePacket('ping', payload)))
def recverack(self):
logger.debug('verack received')
self.verackReceived = True
if self.verackSent:
# We have thus both sent and received a verack.
def sslHandshake(self):
self.sslSock = self.sock
if (( & protocol.NODE_SSL == protocol.NODE_SSL) and
protocol.haveSSL(not self.initiatedConnection)):
2015-11-22 21:44:58 +00:00
logger.debug("Initialising TLS")
if sys.version_info >= (2,7,9):
context = ssl.SSLContext(protocol.sslProtocolVersion)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# also exclude TLSv1 and TLSv1.1 in the future
context.options = ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_SINGLE_ECDH_USE | ssl.OP_CIPHER_SERVER_PREFERENCE
self.sslSock = context.wrap_socket(self.sock, server_side = not self.initiatedConnection, do_handshake_on_connect=False)
self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(paths.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(paths.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=protocol.sslProtocolVersion, do_handshake_on_connect=False, ciphers=protocol.sslProtocolCiphers)
while True:
logger.debug("TLS handshake success")
2017-05-07 18:15:57 +00:00
if sys.version_info >= (2, 7, 9):
logger.debug("TLS protocol version: %s", self.sslSock.version())
except ssl.SSLError as e:
if sys.hexversion >= 0x02070900:
if isinstance (e, ssl.SSLWantReadError):
logger.debug("Waiting for SSL socket handhake read")[self.sslSock], [], [], 10)
elif isinstance (e, ssl.SSLWantWriteError):
logger.debug("Waiting for SSL socket handhake write")[], [self.sslSock], [], 10)
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
logger.debug("Waiting for SSL socket handhake read")[self.sslSock], [], [], 10)
elif e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
logger.debug("Waiting for SSL socket handhake write")[], [self.sslSock], [], 10)
logger.error("SSL socket handhake failed: shutting down connection, %s", str(e))
self.sendDataThreadQueue.put((0, 'shutdown','tls handshake fail %s' % (str(e))))
return False
except socket.error as err:
logger.debug('SSL socket handshake failed, shutting down connection, %s', str(err))
self.sendDataThreadQueue.put((0, 'shutdown','tls handshake fail'))
return False
except Exception:
logger.error("SSL socket handhake failed, shutting down connection", exc_info=True)
self.sendDataThreadQueue.put((0, 'shutdown','tls handshake fail'))
return False
# SSL in the background should be blocking, otherwise the error handling is difficult
return True
# no SSL
return True
def peerValidityChecks(self):
if self.remoteProtocolVersion < 3:
2017-02-07 19:09:11 +00:00
self.sendDataThreadQueue.put((0, 'sendRawData',protocol.assembleErrorMessage(
fatal=2, errorText="Your is using an old protocol. Closing connection.")))
logger.debug ('Closing connection to old protocol version ' + str(self.remoteProtocolVersion) + ' node: ' + str(self.peer))
return False
if self.timeOffset > 3600:
2017-02-07 19:09:11 +00:00
self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(
fatal=2, errorText="Your time is too far in the future compared to mine. Closing connection.")))"%s's time is too far in the future (%s seconds). Closing connection to it.", self.peer, self.timeOffset)
shared.timeOffsetWrongCount += 1
return False
elif self.timeOffset < -3600:
2017-02-07 19:09:11 +00:00
self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(
fatal=2, errorText="Your time is too far in the past compared to mine. Closing connection.")))"%s's time is too far in the past (timeOffset %s seconds). Closing connection to it.", self.peer, self.timeOffset)
shared.timeOffsetWrongCount += 1
return False
shared.timeOffsetWrongCount = 0
if len(self.streamNumber) == 0:
2017-02-07 19:09:11 +00:00
self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(
fatal=2, errorText="We don't have shared stream interests. Closing connection.")))
logger.debug ('Closed connection to ' + str(self.peer) + ' because there is no overlapping interest in streams.')
return False
return True
def connectionFullyEstablished(self):
if self.connectionIsOrWasFullyEstablished:
# there is no reason to run this function a second time
if not self.sslHandshake():
if self.peerValidityChecks() == False:
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
self.connectionIsOrWasFullyEstablished = True
shared.timeOffsetWrongCount = 0
# Command the corresponding sendDataThread to set its own connectionIsOrWasFullyEstablished variable to True also
2015-11-22 21:44:58 +00:00
self.sendDataThreadQueue.put((0, 'connectionIsOrWasFullyEstablished', (, self.sslSock)))
if not self.initiatedConnection:
shared.clientHasReceivedIncomingConnections = True
queues.UISignalQueue.put(('setStatusIcon', 'green'))
600) # We'll send out a ping every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
queues.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
logger.debug('Connection fully established with ' + str(self.peer) + "\n" + \
'The size of the connectedHostsList is now ' + str(len(shared.connectedHostsList)) + "\n" + \
'The length of sendDataQueues is now: ' + str(len(state.sendDataQueues)) + "\n" + \
'broadcasting addr from within connectionFullyEstablished function.')
2017-01-12 18:18:56 +00:00
if self.initiatedConnection:
state.networkProtocolAvailability[protocol.networkType(] = True
# we need to send our own objects to this node
# Let all of our peers know about this new node.
for stream in self.remoteStreams:
dataToSend = (int(time.time()), stream,,, self.remoteNodeIncomingPort)
stream, 'advertisepeer', dataToSend))
self.sendaddr() # This is one large addr message to this one peer.
if len(shared.connectedHostsList) > \
BMConfigParser().safeGetInt("bitmessagesettings", "maxtotalconnections", 200): ('We are connected to too many people. Closing connection.')
if self.initiatedConnection:
self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(fatal=2, errorText="Thank you for providing a listening node.")))
self.sendDataThreadQueue.put((0, 'sendRawData', protocol.assembleErrorMessage(fatal=2, errorText="Server full, please try again later.")))
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
def sendBigInv(self):
2014-08-27 07:14:32 +00:00
# Select all hashes for objects in this stream.
bigInvList = {}
for stream in self.streamNumber:
for hash in Inventory().unexpired_hashes_by_stream(stream):
if not self.objectHashHolderInstance.hasHash(hash):
bigInvList[hash] = 0
numberOfObjectsInInvMessage = 0
payload = ''
# Now let us start appending all of these hashes together. They will be
# sent out in a big inv message to our new peer.
for hash, storedValue in bigInvList.items():
payload += hash
numberOfObjectsInInvMessage += 1
if numberOfObjectsInInvMessage == 50000: # We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
numberOfObjectsInInvMessage, payload)
payload = ''
numberOfObjectsInInvMessage = 0
if numberOfObjectsInInvMessage > 0:
numberOfObjectsInInvMessage, payload)
# Used to send a big inv message when the connection with a node is
# first fully established. Notice that there is also a broadcastinv
# function for broadcasting invs to everyone in our stream.
def sendinvMessageToJustThisOnePeer(self, numberOfObjects, payload):
payload = encodeVarint(numberOfObjects) + payload
logger.debug('Sending huge inv message with ' + str(numberOfObjects) + ' objects to just this one peer')
self.sendDataThreadQueue.put((0, 'sendRawData', protocol.CreatePacket('inv', payload)))
def _sleepForTimingAttackMitigation(self, sleepTime):
# We don't need to do the timing attack mitigation if we are
# only connected to the trusted peer because we can trust the
# peer not to attack
if sleepTime > 0 and doTimingAttackMitigation and state.trustedPeer == None:
logger.debug('Timing attack mitigation: Sleeping for ' + str(sleepTime) + ' seconds.')
def recerror(self, data):
The remote node has been polite enough to send you an error message.
fatalStatus, readPosition = decodeVarint(data[:10])
banTime, banTimeLength = decodeVarint(data[readPosition:readPosition+10])
readPosition += banTimeLength
inventoryVectorLength, inventoryVectorLengthLength = decodeVarint(data[readPosition:readPosition+10])
if inventoryVectorLength > 100:
readPosition += inventoryVectorLengthLength
inventoryVector = data[readPosition:readPosition+inventoryVectorLength]
readPosition += inventoryVectorLength
errorTextLength, errorTextLengthLength = decodeVarint(data[readPosition:readPosition+10])
if errorTextLength > 1000:
readPosition += errorTextLengthLength
errorText = data[readPosition:readPosition+errorTextLength]
if fatalStatus == 0:
fatalHumanFriendly = 'Warning'
elif fatalStatus == 1:
fatalHumanFriendly = 'Error'
elif fatalStatus == 2:
fatalHumanFriendly = 'Fatal'
message = '%s message received from %s: %s.' % (fatalHumanFriendly, self.peer, errorText)
if inventoryVector:
2016-03-23 22:26:57 +00:00
message += " This concerns object %s" % hexlify(inventoryVector)
if banTime > 0:
message += " Remote node says that the ban time is %s" % banTime
2014-08-27 07:14:32 +00:00
def recobject(self, data):
self.messageProcessingStartTime = time.time()
2014-08-27 07:14:32 +00:00
lengthOfTimeWeShouldUseToProcessThisMessage = shared.checkAndShareObjectWithPeers(data)
2014-08-27 07:14:32 +00:00
2014-08-27 07:14:32 +00:00
Sleeping will help guarantee that we can process messages faster than a
remote node can send them. If we fall behind, the attacker could observe
that we are are slowing down the rate at which we request objects from the
network which would indicate that we own a particular address (whichever
one to which they are sending all of their attack messages). Note
that if an attacker connects to a target with many connections, this
mitigation mechanism might not be sufficient.
2014-08-27 07:14:32 +00:00
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time() - self.messageProcessingStartTime)
2014-08-27 07:14:32 +00:00
# We have received an inv message
def recinv(self, data):
numberOfItemsInInv, lengthOfVarint = decodeVarint(data[:10])
if numberOfItemsInInv > 50000:
sys.stderr.write('Too many items in inv message!')
if len(data) < lengthOfVarint + (numberOfItemsInInv * 32):'inv message doesn\'t contain enough data. Ignoring.')
startTime = time.time()
advertisedSet = set()
for i in range(numberOfItemsInInv):
advertisedSet.add(data[lengthOfVarint + (32 * i):32 + lengthOfVarint + (32 * i)])
objectsNewToMe = advertisedSet
for stream in self.streamNumber:
objectsNewToMe -= Inventory().hashes_by_stream(stream)'inv message lists %s objects. Of those %s are new to me. It took %s seconds to figure that out.', numberOfItemsInInv, len(objectsNewToMe), time.time()-startTime)
for item in random.sample(objectsNewToMe, len(objectsNewToMe)):
# Send a getdata message to our peer to request the object with the given
# hash
def sendgetdata(self, hashes):
2017-01-16 22:37:25 +00:00
if len(hashes) == 0:
logger.debug('sending getdata to retrieve %i objects', len(hashes))
payload = encodeVarint(len(hashes)) + ''.join(hashes)
self.sendDataThreadQueue.put((0, 'sendRawData', protocol.CreatePacket('getdata', payload)), False)
# We have received a getdata request from our peer
def recgetdata(self, data):
numberOfRequestedInventoryItems, lengthOfVarint = decodeVarint(
if len(data) < lengthOfVarint + (32 * numberOfRequestedInventoryItems):
logger.debug('getdata message does not contain enough data. Ignoring.')
self.antiIntersectionDelay(True) # only handle getdata requests if we have been connected long enough
for i in xrange(numberOfRequestedInventoryItems):
hash = data[lengthOfVarint + (
i * 32):32 + lengthOfVarint + (i * 32)]
2016-03-23 22:26:57 +00:00
logger.debug('received getdata request for item:' + hexlify(hash))
if self.objectHashHolderInstance.hasHash(hash):
if hash in Inventory():
self.sendObject(hash, Inventory()[hash].payload)
2015-01-28 19:45:29 +00:00
logger.warning('%s asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. We probably cleaned it out after advertising it but before they got around to asking for it.' % (self.peer,))
# Our peer has requested (in a getdata message) that we send an object.
def sendObject(self, hash, payload):
logger.debug('sending an object.')
self.sendDataThreadQueue.put((0, 'sendRawData', (hash, protocol.CreatePacket('object',payload))))