This commit is contained in:
Carlos Killpack 2013-06-22 16:50:38 -06:00
commit 9fb8fda02d

View File

@ -44,6 +44,7 @@ from subprocess import call # used when the API must execute an outside program
import singleton import singleton
import proofofwork import proofofwork
# Logging # Logging
# TODO(fiatflux): Add option to suppress extra logging info (e.g. tracebacks) in stdout/stderr.
from debug import logger from debug import logger
# Classes # Classes
@ -74,7 +75,8 @@ class outgoingSynSender(threading.Thread):
time.sleep(1) time.sleep(1)
global alreadyAttemptedConnectionsListResetTime global alreadyAttemptedConnectionsListResetTime
while True: while True:
while len(selfInitiatedConnections[self.streamNumber]) >= 8: # maximum number of outgoing connections = 8 # Maximum number of outgoing connections = 8.
while len(selfInitiatedConnections[self.streamNumber]) >= 8:
time.sleep(10) time.sleep(10)
if shared.shutdown: if shared.shutdown:
break break
@ -85,7 +87,7 @@ class outgoingSynSender(threading.Thread):
alreadyAttemptedConnectionsListLock.acquire() alreadyAttemptedConnectionsListLock.acquire()
while HOST in alreadyAttemptedConnectionsList or HOST in shared.connectedHostsList: while HOST in alreadyAttemptedConnectionsList or HOST in shared.connectedHostsList:
alreadyAttemptedConnectionsListLock.release() alreadyAttemptedConnectionsListLock.release()
# print 'choosing new sample' logger.debug('Choosing new sample.')
random.seed() random.seed()
shared.knownNodesLock.acquire() shared.knownNodesLock.acquire()
HOST, = random.sample(shared.knownNodes[self.streamNumber], 1) HOST, = random.sample(shared.knownNodes[self.streamNumber], 1)
@ -109,16 +111,17 @@ class outgoingSynSender(threading.Thread):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(20) sock.settimeout(20)
if shared.config.get('bitmessagesettings', 'socksproxytype') == 'none': if shared.config.get('bitmessagesettings', 'socksproxytype') == 'none':
logger.info('Trying an outgoing connection to %s:%s' % (HOST, PORT)) logger.debug('Trying an outgoing connection to %s:%s' % (HOST, PORT))
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif shared.config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS4a': elif shared.config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS4a':
logger.debug('(Using SOCKS4) Trying an outgoing connection to %s:%s.' % (HOST, PORT)) logger.debug('(Using SOCKS4a) Trying an outgoing connection to %s:%s' % (HOST, PORT))
proxytype = socks.PROXY_TYPE_SOCKS4 proxytype = socks.PROXY_TYPE_SOCKS4
sockshostname = shared.config.get( sockshostname = shared.config.get(
'bitmessagesettings', 'sockshostname') 'bitmessagesettings', 'sockshostname')
socksport = shared.config.getint( socksport = shared.config.getint(
'bitmessagesettings', 'socksport') 'bitmessagesettings', 'socksport')
rdns = True # Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway. # Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be
# doing any domain name lookups anyway.
rdns = True
if shared.config.getboolean('bitmessagesettings', 'socksauthentication'): if shared.config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = shared.config.get( socksusername = shared.config.get(
'bitmessagesettings', 'socksusername') 'bitmessagesettings', 'socksusername')
@ -130,13 +133,15 @@ class outgoingSynSender(threading.Thread):
sock.setproxy( sock.setproxy(
proxytype, sockshostname, socksport, rdns) proxytype, sockshostname, socksport, rdns)
elif shared.config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS5': elif shared.config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS5':
logger.info('(Using SOCKS5) Trying an outgiong connection to %s:%s.' % (HOST, PORT)) logger.debug('(Using SOCKS5) Trying an outgoing connection to %s:%s' % (HOST, PORT))
proxytype = socks.PROXY_TYPE_SOCKS5 proxytype = socks.PROXY_TYPE_SOCKS5
sockshostname = shared.config.get( sockshostname = shared.config.get(
'bitmessagesettings', 'sockshostname') 'bitmessagesettings', 'sockshostname')
socksport = shared.config.getint( socksport = shared.config.getint(
'bitmessagesettings', 'socksport') 'bitmessagesettings', 'socksport')
rdns = True # Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway. # Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be
# doing any domain name lookups anyway.
rdns = True
if shared.config.getboolean('bitmessagesettings', 'socksauthentication'): if shared.config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = shared.config.get( socksusername = shared.config.get(
'bitmessagesettings', 'socksusername') 'bitmessagesettings', 'socksusername')
@ -156,9 +161,7 @@ class outgoingSynSender(threading.Thread):
rd.setup(sock, HOST, PORT, self.streamNumber, rd.setup(sock, HOST, PORT, self.streamNumber,
objectsOfWhichThisRemoteNodeIsAlreadyAware) objectsOfWhichThisRemoteNodeIsAlreadyAware)
rd.start() rd.start()
shared.printLock.acquire() logger.info('%s connected to %s during an outgoing attempt.' % (self, HOST))
print self, 'connected to', HOST, 'during an outgoing attempt.'
shared.printLock.release()
sd = sendDataThread() sd = sendDataThread()
sd.setup(sock, HOST, PORT, self.streamNumber, sd.setup(sock, HOST, PORT, self.streamNumber,
@ -170,34 +173,39 @@ class outgoingSynSender(threading.Thread):
logger.debug('Could NOT connect to %s during outgoing attempt. %s' % (HOST, str(err))) logger.debug('Could NOT connect to %s during outgoing attempt. %s' % (HOST, str(err)))
PORT, timeLastSeen = shared.knownNodes[ PORT, timeLastSeen = shared.knownNodes[
self.streamNumber][HOST] self.streamNumber][HOST]
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the shared.knownNodes data-structure. # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the
# shared.knownNodes data-structure.
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000:
shared.knownNodesLock.acquire() shared.knownNodesLock.acquire()
del shared.knownNodes[self.streamNumber][HOST] del shared.knownNodes[self.streamNumber][HOST]
shared.knownNodesLock.release() shared.knownNodesLock.release()
logger.info('Deleting %s from shared.knownNodes because it is more that 48 hours old and we could not connect to it.' % HOST) logger.debug('deleting %s from shared.knownNodes because it is more than 48 hours old and we '
'could not connect to it.' % (HOST))
except socks.Socks5AuthError as err: except socks.Socks5AuthError as err:
shared.UISignalQueue.put(( shared.UISignalQueue.put((
'updateStatusBar', translateText( 'updateStatusBar', translateText(
"MainWindow", "SOCKS5 Authentication problem: %1").arg(str(err)))) "MainWindow", "SOCKS5 Authentication problem: %1").arg(str(err))))
except socks.Socks5Error as err: except socks.Socks5Error as err:
logger.debug('SOCKS5 error (It is possible that the server wants authentication): %s' % str(err)) logger.exception('SOCKS5 error. It is possible that the server wants authentication.')
except socks.Socks4Error as err: except socks.Socks4Error as err:
logger.debug('SOCKS4 Error: %s' % str(err)) logger.exception('Socks4Error')
except socket.error as err: except socket.error as err:
if shared.config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS': if shared.config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
logger.debug('Bitmessage MIGHT be having trouble connecting to the SOCKS server: %s' % str(err)) logger.exception('Bitmessage MIGHT be having trouble connecting to the SOCKS server.')
else: else:
logger.debug('Could NOT connect to %s during outgoing attempt: %s' % (HOST, str(err))) logger.info('Could NOT connect to %s during outgoing attempt.', HOST, exc_info=True)
PORT, timeLastSeen = shared.knownNodes[ PORT, timeLastSeen = shared.knownNodes[
self.streamNumber][HOST] self.streamNumber][HOST]
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure. # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the
# knownNodes data-structure.
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000:
shared.knownNodesLock.acquire() shared.knownNodesLock.acquire()
del shared.knownNodes[self.streamNumber][HOST] del shared.knownNodes[self.streamNumber][HOST]
shared.knownNodesLock.release() shared.knownNodesLock.release()
logger.info('Deleting %s from knownNodes beause it is more than 48 hours old and we could not connect to it.' % HOST) logger.info('Deleting %s from knownNodes because it is more than 48 hours old and we '
'could not connect to it.' % (HOST))
except Exception as err: except Exception as err:
logger.debug( logger.exception('An uncaught exception has occurred in the outgoingSynSender thread.')
'An exception has occurred in the outgoingSynSender thread that was not caught by other exception types: %s' % str(err))
time.sleep(0.1) time.sleep(0.1)
@ -246,7 +254,7 @@ class singleListener(threading.Thread):
# because the two computers will share the same external IP. This # because the two computers will share the same external IP. This
# is here to prevent connection flooding. # is here to prevent connection flooding.
while HOST in shared.connectedHostsList: while HOST in shared.connectedHostsList:
logger.info('We are already connected to %s. Ignoring connection.' % HOST) logger.warning('We are already connected to', HOST + '. Ignoring connection.')
a.close() a.close()
a, (HOST, PORT) = sock.accept() a, (HOST, PORT) = sock.accept()
objectsOfWhichThisRemoteNodeIsAlreadyAware = {} objectsOfWhichThisRemoteNodeIsAlreadyAware = {}
@ -287,48 +295,63 @@ class receiveDataThread(threading.Thread):
self.HOST = HOST self.HOST = HOST
self.PORT = port self.PORT = port
self.streamNumber = streamNumber self.streamNumber = streamNumber
self.payloadLength = 0 # This is the protocol payload length thus it doesn't include the 24 byte message header # This is the protocol payload length thus it does NOT include the 24 byte message header.
self.payloadLength = 0
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave = {} self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave = {}
# The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it
# to this list so that an outgoingSynSender thread doesn't try to connect to it..
shared.connectedHostsList[ shared.connectedHostsList[
self.HOST] = 0 # The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that an outgoingSynSender thread doesn't try to connect to it. self.HOST] = 0
self.connectionIsOrWasFullyEstablished = False # set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
if self.streamNumber == -1: # This was an incoming connection. Send out a version message if we accept the other node's version message. # set to true after the remote node and I accept each other's version messages. This is needed to allow the user
# interface to accurately reflect the current number of connections.
self.connectionIsOrWasFullyEstablished = False
# This was an incoming connection. Send out a version message if we accept the other node's version message.
if self.streamNumber == -1:
self.initiatedConnection = False self.initiatedConnection = False
else: else:
self.initiatedConnection = True self.initiatedConnection = True
selfInitiatedConnections[streamNumber][self] = 0 selfInitiatedConnections[streamNumber][self] = 0
# When we receive a message bound for us, we store the acknowledgement that we need to send (the ackdata) here
# until we are done processing all other data received from this peer.
self.ackDataThatWeHaveYetToSend = [ self.ackDataThatWeHaveYetToSend = [
] # When we receive a message bound for us, we store the acknowledgement that we need to send (the ackdata) here until we are done processing all other data received from this peer. ]
self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware self.objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
def run(self): def run(self):
logger.info('ID of the recieveDataThread is %s. The size of the shared.connectedHostsList is now %d' % logger.info(
(str(id(self)), len(shared.connectedHostsList))) 'ID of the receiveDataThread is %s. The size of the shared.connectedHostsList is now %s',
str(id(self)), len(shared.connectedHostsList))
while True: while True:
try: try:
self.data += self.sock.recv(4096) self.data += self.sock.recv(4096)
except socket.timeout: except socket.timeout:
logger.exception('Timeout occurred waiting for data from %s. Closing recieveData thread. (ID: %s)' % (self.HOST, str(id(self)))) logger.info('Timeout occurred waiting for data from %s. Closing receiveData thread. (ID:%s)',
self.HOST, id(self))
break break
except Exception as err: except Exception as err:
logger.exception('sock.recv error. Closing recieveData thread (HOST: %s, ID: %s): %s' % (self.HOST, str(id(self)), str(err))) logger.exception('sock.recv error. Closing receiveData thread (HOST:%s, ID:%s).',
self.HOST, id(self))
break break
if self.data == "": if self.data == "":
logger.debug('Connection to %s closed. Closing recieveData thread. (ID: %s)' % (self.HOST, str(id(self)))) logger.info('Connection to %s closed. Closing receiveData thread. (ID:%s)', self.HOST, id(self))
break break
else: else:
self.processData() self.processData()
try: try:
del selfInitiatedConnections[self.streamNumber][self] del selfInitiatedConnections[self.streamNumber][self]
logger.info('Removed a recieveDataThread (self) from selfInitatedConnections') logger.debug('Removed self (a receiveDataThread) from selfInitiatedConnections')
except: except:
pass pass
shared.broadcastToSendDataQueues((0, 'shutdown', self.HOST)) shared.broadcastToSendDataQueues((0, 'shutdown', self.HOST))
try: try:
del shared.connectedHostsList[self.HOST] del shared.connectedHostsList[self.HOST]
except Exception as err: except Exception as err:
logger.debug('Could not delete %s from shared.connectionsHostsList. %s' % (self.HOST, str(err))) logger.exception('Could not delete from shared.connectedHostsList.' % (self.host))
try: try:
del numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[ del numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[
self.HOST] self.HOST]
@ -338,36 +361,41 @@ class receiveDataThread(threading.Thread):
logger.info('The size of the connectedHosts list is now: %d' % len(shared.connectedHostsList)) logger.info('The size of the connectedHosts list is now: %d' % len(shared.connectedHostsList))
def processData(self): def processData(self):
global verbose
# if verbose >= 3:
# shared.printLock.acquire()
# print 'self.data is currently ', repr(self.data)
# shared.printLock.release()
if len(self.data) < 20: # if so little of the data has arrived that we can't even unpack the payload length if len(self.data) < 20: # if so little of the data has arrived that we can't even unpack the payload length
return return
if self.data[0:4] != '\xe9\xbe\xb4\xd9': if self.data[0:4] != '\xe9\xbe\xb4\xd9':
logger.info('The magic bytes were not correct. First 40 bytes of data: %s' % str(repr(self.data[0:40]))) logger.debug('The magic bytes were not correct. First 40 bytes of data: ' + repr(self.data[0:40]))
self.data = "" self.data = ""
return return
self.payloadLength, = unpack('>L', self.data[16:20]) self.payloadLength, = unpack('>L', self.data[16:20])
if len(self.data) < self.payloadLength + 24: # check if the whole message has arrived yet. if len(self.data) < self.payloadLength + 24: # check if the whole message has arrived yet.
return return
if self.data[20:24] != hashlib.sha512(self.data[24:self.payloadLength + 24]).digest()[0:4]: # test the checksum in the message. If it is correct... # test the checksum in the message. If it is correct...
logger.info('Checksum incorrect. Clearning this message.') if self.data[20:24] != hashlib.sha512(self.data[24:self.payloadLength + 24]).digest()[0:4]:
logger.info('Checksum incorrect. Clearing this message.')
self.data = self.data[self.payloadLength + 24:] self.data = self.data[self.payloadLength + 24:]
self.processData() self.processData()
return return
# The time we've last seen this node is obviously right now since we # The time we've last seen this node is obviously right now since we
# just received valid data from it. So update the knownNodes list so # just received valid data from it. So update the knownNodes list so
# that other peers can be made aware of its existance. # that other peers can be made aware of its existance.
if self.initiatedConnection and self.connectionIsOrWasFullyEstablished: # The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port). #
# The remote port is only something we should share with others if it is the remote node's incoming port (rather
# than some random operating-system-assigned outgoing port).
if self.initiatedConnection and self.connectionIsOrWasFullyEstablished:
shared.knownNodesLock.acquire() shared.knownNodesLock.acquire()
shared.knownNodes[self.streamNumber][ shared.knownNodes[self.streamNumber][
self.HOST] = (self.PORT, int(time.time())) self.HOST] = (self.PORT, int(time.time()))
shared.knownNodesLock.release() shared.knownNodesLock.release()
if self.payloadLength <= 180000000: # If the size of the message is greater than 180MB, ignore it. (I get memory errors when processing messages much larger than this though it is concievable that this value will have to be lowered if some systems are less tolarant of large messages.)
# If the size of the message is greater than 180MB, ignore it. (I get
# memory errors when processing messages much larger than this though
# it is concievable that this value will have to be lowered if some
# systems are less tolarant of large messages.)
if self.payloadLength <= 180000000:
remoteCommand = self.data[4:16] remoteCommand = self.data[4:16]
logger.info('remoteCommand %s from %s' % (repr(remoteCommand.replace('\x00', '')), self.HOST)) logger.info('remoteCommand %s from %s', remoteCommand.replace('\x00', ''), self.HOST)
if remoteCommand == 'version\x00\x00\x00\x00\x00': if remoteCommand == 'version\x00\x00\x00\x00\x00':
self.recversion(self.data[24:self.payloadLength + 24]) self.recversion(self.data[24:self.payloadLength + 24])
elif remoteCommand == 'verack\x00\x00\x00\x00\x00\x00': elif remoteCommand == 'verack\x00\x00\x00\x00\x00\x00':
@ -401,19 +429,24 @@ class receiveDataThread(threading.Thread):
objectHash, = random.sample( objectHash, = random.sample(
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave, 1) self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave, 1)
if objectHash in shared.inventory: if objectHash in shared.inventory:
logger.info('Inventory (in memory) already has object listen in inv message.') logger.debug('Inventory (in memory) already has object listed in inv message.')
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[ del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[
objectHash] objectHash]
elif isInSqlInventory(objectHash): elif isInSqlInventory(objectHash):
logger.info('Inventory (SQL on disk) alredy has object listen in inv message.') if verbose >= 3:
logger.debug('Inventory (SQL on disk) already has object listed in inv message.')
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[ del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[
objectHash] objectHash]
else: else:
self.sendgetdata(objectHash) self.sendgetdata(objectHash)
# It is possible that the remote node doesn't respond with the object. In that
# case, we'll very likely get it from someone else anyway.
del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[ del self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[
objectHash] # It is possible that the remote node doesn't respond with the object. In that case, we'll very likely get it from someone else anyway. objectHash]
if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) == 0: if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) == 0:
logger.info('(concerning %s) Number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now: 0' % self.HOST) logger.debug('(concerning %s) number of '
'objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now %s',
self.HOST, len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave))
try: try:
del numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[ del numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[
self.HOST] # this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together. self.HOST] # this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
@ -421,15 +454,18 @@ class receiveDataThread(threading.Thread):
pass pass
break break
if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) == 0: if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) == 0:
logger.info('(concerning %s) number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now 0' % self.HOST) logger.debug('(concerning %s) number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now %s',
self.HOST,
len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave))
try: try:
del numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[ del numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[
self.HOST] # this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together. self.HOST] # this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
except: except:
pass pass
if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0: if len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) > 0:
logger.info('(concerning %s ) number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now %d' % logger.debug('(concerning %s) number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now %s',
(self.HOST, len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave))) self.HOST,
len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave))
numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[self.HOST] = len( numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[self.HOST] = len(
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) # this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together. self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave) # this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
if len(self.ackDataThatWeHaveYetToSend) > 0: if len(self.ackDataThatWeHaveYetToSend) > 0:
@ -447,20 +483,18 @@ class receiveDataThread(threading.Thread):
payloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes payloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[ POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8]) :8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8])
# print 'POW:', POW
return POW <= 2 ** 64 / ((len(data) + payloadLengthExtraBytes) * (nonceTrialsPerByte)) return POW <= 2 ** 64 / ((len(data) + payloadLengthExtraBytes) * (nonceTrialsPerByte))
def sendpong(self): def sendpong(self):
print 'Sending pong' logger.debug('Sending pong.')
try: try:
self.sock.sendall( self.sock.sendall(
'\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35') '\xE9\xBE\xB4\xD9\x70\x6F\x6E\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x83\xe1\x35')
except Exception as err: except Exception as err:
# if not 'Bad file descriptor' in err: logger.exception('Caught exception sending pong.')
logger.exception(err)
def recverack(self): def recverack(self):
print 'verack received' logger.debug('verack received')
self.verackReceived = True self.verackReceived = True
if self.verackSent: if self.verackSent:
# We have thus both sent and received a verack. # We have thus both sent and received a verack.
@ -475,14 +509,15 @@ class receiveDataThread(threading.Thread):
shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data')) shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
remoteNodeIncomingPort, remoteNodeSeenTime = shared.knownNodes[ remoteNodeIncomingPort, remoteNodeSeenTime = shared.knownNodes[
self.streamNumber][self.HOST] self.streamNumber][self.HOST]
logger.info('Connection fully established with %s:%d' % (self.HOST, remoteNodeIncomingPort)) logger.info('Connection fully established with %s:%s', self.HOST, remoteNodeIncomingPort)
logger.info('The size of the connectedHostsList is now %d' % len(shared.connectedHostsList)) logger.info('The size of the connectedHostsList is now: %s', len(shared.connectedHostsList))
logger.info('Broadcasting addr from within connectionFullyEstablished()') logger.info('The length of sendDataQueues is now: %s', len(shared.sendDataQueues))
logger.info('broadcasting addr from within connectionFullyEstablished function.')
self.broadcastaddr([(int(time.time()), self.streamNumber, 1, self.HOST, self.broadcastaddr([(int(time.time()), self.streamNumber, 1, self.HOST,
remoteNodeIncomingPort)]) # This lets all of our peers know about this new node. remoteNodeIncomingPort)]) # This lets all of our peers know about this new node.
self.sendaddr() # This is one large addr message to this one peer. self.sendaddr() # This is one large addr message to this one peer.
if not self.initiatedConnection and len(shared.connectedHostsList) > 200: if not self.initiatedConnection and len(shared.connectedHostsList) > 200:
logger.info('We are connected to too many people. Closing connection.') logger.warning('We are connected to too many people. Closing connection.')
shared.broadcastToSendDataQueues((0, 'shutdown', self.HOST)) shared.broadcastToSendDataQueues((0, 'shutdown', self.HOST))
return return
self.sendBigInv() self.sendBigInv()
@ -536,12 +571,11 @@ class receiveDataThread(threading.Thread):
headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00' headerData += 'inv\x00\x00\x00\x00\x00\x00\x00\x00\x00'
headerData += pack('>L', len(payload)) headerData += pack('>L', len(payload))
headerData += hashlib.sha512(payload).digest()[:4] headerData += hashlib.sha512(payload).digest()[:4]
logger.info('Sending huge inv message with %d objects to just this one peer.' % numberOfObjects) logger.info('Sending huge inv message with %s objects to just this one peer.', numberOfObjects)
try: try:
self.sock.sendall(headerData + payload) self.sock.sendall(headerData + payload)
except Exception as err: except Exception as err:
# if not 'Bad file descriptor' in err: logger.exception('Error sending inv.')
logger.exception('sock.sendall error: %s' % str(err))
# We have received a broadcast message # We have received a broadcast message
def recbroadcast(self, data): def recbroadcast(self, data):
@ -579,18 +613,20 @@ class receiveDataThread(threading.Thread):
streamNumber, streamNumberLength = decodeVarint(data[ streamNumber, streamNumberLength = decodeVarint(data[
readPosition + broadcastVersionLength:readPosition + broadcastVersionLength + 10]) readPosition + broadcastVersionLength:readPosition + broadcastVersionLength + 10])
if streamNumber != self.streamNumber: if streamNumber != self.streamNumber:
logger.info("The stream number encoded in this broadcast message (%d) doesn't match the stream number it was recieved on. Ignoring it." % logger.info('The stream number encoded in this broadcast message (%s) does not '
'match the stream number on which it was received. Ignoring it.',
streamNumber) streamNumber)
return return
shared.inventoryLock.acquire() shared.inventoryLock.acquire()
self.inventoryHash = calculateInventoryHash(data) self.inventoryHash = calculateInventoryHash(data)
if self.inventoryHash in shared.inventory: if self.inventoryHash in shared.inventory:
logger.info('We have already recieved this broadcast object. Ignoring.') logger.info('We have already received this broadcast object. Ignoring.')
shared.inventoryLock.release() shared.inventoryLock.release()
return return
elif isInSqlInventory(self.inventoryHash): elif isInSqlInventory(self.inventoryHash):
logger.info('We have already received this broadcast object (it is stored on disk in the SQL inventory). Ignoring it.') logger.info('We have already received this broadcast object '
'(it is stored on disk in the SQL inventory). Ignoring it.')
shared.inventoryLock.release() shared.inventoryLock.release()
return return
# It is valid so far. Let's let our peers know about it. # It is valid so far. Let's let our peers know about it.
@ -724,7 +760,7 @@ class receiveDataThread(threading.Thread):
fromAddress = encodeAddress( fromAddress = encodeAddress(
sendersAddressVersion, sendersStream, ripe.digest()) sendersAddressVersion, sendersStream, ripe.digest())
logger.info('fromAddress: %s' str(fromAddress)) logger.info('fromAddress: %s', str(fromAddress))
if messageEncodingType == 2: if messageEncodingType == 2:
bodyPositionIndex = string.find(message, '\nBody:') bodyPositionIndex = string.find(message, '\nBody:')
if bodyPositionIndex > 1: if bodyPositionIndex > 1:
@ -737,7 +773,7 @@ class receiveDataThread(threading.Thread):
body = message body = message
subject = '' subject = ''
elif messageEncodingType == 0: elif messageEncodingType == 0:
logger.info('messageEncodingType == 0. Doing nothing with the message.') logger.debug('messageEncodingType == 0. Doing nothing with the message.')
else: else:
body = 'Unknown encoding type.\n\n' + repr(message) body = 'Unknown encoding type.\n\n' + repr(message)
subject = '' subject = ''
@ -765,24 +801,33 @@ class receiveDataThread(threading.Thread):
call([apiNotifyPath, "newBroadcast"]) call([apiNotifyPath, "newBroadcast"])
# Display timing data # Display timing data
logger.info('Time spent processing this interesting broadcast: %d' % time.time() - self.messageProcessingStartTime) logger.info('Time spent processing this interesting broadcast: %s',
time.time() - self.messageProcessingStartTime)
if broadcastVersion == 2: if broadcastVersion == 2:
cleartextStreamNumber, cleartextStreamNumberLength = decodeVarint( cleartextStreamNumber, cleartextStreamNumberLength = decodeVarint(
data[readPosition:readPosition + 10]) data[readPosition:readPosition + 10])
readPosition += cleartextStreamNumberLength readPosition += cleartextStreamNumberLength
initialDecryptionSuccessful = False initialDecryptionSuccessful = False
for key, cryptorObject in shared.MyECSubscriptionCryptorObjects.items(): for key, cryptorObject in shared.MyECSubscriptionCryptorObjects.items():
# TODO(grobinson): Once successful, shouldn't keep trying.
try: try:
decryptedData = cryptorObject.decrypt(data[readPosition:]) decryptedData = cryptorObject.decrypt(data[readPosition:])
toRipe = key # This is the RIPE hash of the sender's pubkey. We need this below to compare to the RIPE hash of the sender's address to verify that it was encrypted by with their key rather than some other key. # This is the RIPE hash of the sender's pubkey. We need this below to compare to
# the RIPE hash of the sender's address to verify that it was encrypted by with
# their key rather than some other key.
toRipe = key
initialDecryptionSuccessful = True initialDecryptionSuccessful = True
logger.info('EC decryption successful using key associated with ripe hash: %d' % key.encode('hex')) logger.info('EC decryption successful using key associated with ripe hash: %s',
key.encode('hex'))
break break
except Exception as err: except Exception as err:
logger.exception('cryptorObject.decrypt Exception: %s' % str(err)) # TODO(grobinson): Avoid using exceptions for normal
# application logic.
pass
if not initialDecryptionSuccessful: if not initialDecryptionSuccessful:
# This is not a broadcast I am interested in. # This is not a broadcast I am interested in.
logger.info('Length of time program spent failinf to decrypt this v2 broadcast %ds' % time.time() - self.messageProcessingStartTime) logger.info('Time spent failing to decrypt this v2 broadcast: %s seconds.',
time.time() - self.messageProcessingStartTime)
return return
# At this point this is a broadcast I have decrypted and thus am # At this point this is a broadcast I have decrypted and thus am
# interested in. # interested in.
@ -792,13 +837,17 @@ class receiveDataThread(threading.Thread):
sendersAddressVersion, sendersAddressVersionLength = decodeVarint( sendersAddressVersion, sendersAddressVersionLength = decodeVarint(
decryptedData[readPosition:readPosition + 9]) decryptedData[readPosition:readPosition + 9])
if sendersAddressVersion < 2 or sendersAddressVersion > 3: if sendersAddressVersion < 2 or sendersAddressVersion > 3:
print 'Cannot decode senderAddressVersion other than 2 or 3. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.' logger.error('Cannot decode senderAddressVersion other than 2 or 3. Assuming the '
'sender isn\'t being silly, you should upgrade Bitmessage because '
'this message shall be ignored.')
return return
readPosition += sendersAddressVersionLength readPosition += sendersAddressVersionLength
sendersStream, sendersStreamLength = decodeVarint( sendersStream, sendersStreamLength = decodeVarint(
decryptedData[readPosition:readPosition + 9]) decryptedData[readPosition:readPosition + 9])
if sendersStream != cleartextStreamNumber: if sendersStream != cleartextStreamNumber:
print 'The stream number outside of the encryption on which the POW was completed doesn\'t match the stream number inside the encryption. Ignoring broadcast.' logger.info('The stream number outside of the encryption on which the POW was '
'completed doesn\'t match the stream number inside the encryption. '
'Ignoring broadcast.')
return return
readPosition += sendersStreamLength readPosition += sendersStreamLength
behaviorBitfield = decryptedData[readPosition:readPosition + 4] behaviorBitfield = decryptedData[readPosition:readPosition + 4]
@ -813,11 +862,13 @@ class receiveDataThread(threading.Thread):
requiredAverageProofOfWorkNonceTrialsPerByte, varintLength = decodeVarint( requiredAverageProofOfWorkNonceTrialsPerByte, varintLength = decodeVarint(
decryptedData[readPosition:readPosition + 10]) decryptedData[readPosition:readPosition + 10])
readPosition += varintLength readPosition += varintLength
print 'sender\'s requiredAverageProofOfWorkNonceTrialsPerByte is', requiredAverageProofOfWorkNonceTrialsPerByte logger.debug('Sender\'s requiredAverageProofOfWorkNonceTrialsPerByte is %s',
requiredAverageProofOfWorkNonceTrialsPerByte)
requiredPayloadLengthExtraBytes, varintLength = decodeVarint( requiredPayloadLengthExtraBytes, varintLength = decodeVarint(
decryptedData[readPosition:readPosition + 10]) decryptedData[readPosition:readPosition + 10])
readPosition += varintLength readPosition += varintLength
print 'sender\'s requiredPayloadLengthExtraBytes is', requiredPayloadLengthExtraBytes logger.debug('sender\'s requiredPayloadLengthExtraBytes is %s',
requiredPayloadLengthExtraBytes)
endOfPubkeyPosition = readPosition endOfPubkeyPosition = readPosition
sha = hashlib.new('sha512') sha = hashlib.new('sha512')
@ -826,7 +877,8 @@ class receiveDataThread(threading.Thread):
ripe.update(sha.digest()) ripe.update(sha.digest())
if toRipe != ripe.digest(): if toRipe != ripe.digest():
print 'The encryption key used to encrypt this message doesn\'t match the keys inbedded in the message itself. Ignoring message.' logger.warning('The encryption key used to encrypt this message doesn\'t match '
'keys inbedded in the message itself. Ignoring message.')
return return
messageEncodingType, messageEncodingTypeLength = decodeVarint( messageEncodingType, messageEncodingTypeLength = decodeVarint(
decryptedData[readPosition:readPosition + 9]) decryptedData[readPosition:readPosition + 9])
@ -846,11 +898,11 @@ class receiveDataThread(threading.Thread):
readPosition:readPosition + signatureLength] readPosition:readPosition + signatureLength]
try: try:
if not highlevelcrypto.verify(decryptedData[:readPositionAtBottomOfMessage], signature, sendersPubSigningKey.encode('hex')): if not highlevelcrypto.verify(decryptedData[:readPositionAtBottomOfMessage], signature, sendersPubSigningKey.encode('hex')):
print 'ECDSA verify failed' logger.warning('ECDSA verify failed.')
return return
print 'ECDSA verify passed' logger.debug('ECDSA verify passed.')
except Exception as err: except Exception as err:
print 'ECDSA verify failed', err logger.exception('ECDSA verify failed')
return return
# verify passed # verify passed
@ -873,9 +925,7 @@ class receiveDataThread(threading.Thread):
fromAddress = encodeAddress( fromAddress = encodeAddress(
sendersAddressVersion, sendersStream, ripe.digest()) sendersAddressVersion, sendersStream, ripe.digest())
shared.printLock.acquire() logger.info('fromAddress: %s.', fromAddress)
print 'fromAddress:', fromAddress
shared.printLock.release()
if messageEncodingType == 2: if messageEncodingType == 2:
bodyPositionIndex = string.find(message, '\nBody:') bodyPositionIndex = string.find(message, '\nBody:')
if bodyPositionIndex > 1: if bodyPositionIndex > 1:
@ -888,7 +938,7 @@ class receiveDataThread(threading.Thread):
body = message body = message
subject = '' subject = ''
elif messageEncodingType == 0: elif messageEncodingType == 0:
print 'messageEncodingType == 0. Doing nothing with the message.' logger.info('messageEncodingType == 0. Doing nothing with the message.')
else: else:
body = 'Unknown encoding type.\n\n' + repr(message) body = 'Unknown encoding type.\n\n' + repr(message)
subject = '' subject = ''
@ -916,16 +966,15 @@ class receiveDataThread(threading.Thread):
call([apiNotifyPath, "newBroadcast"]) call([apiNotifyPath, "newBroadcast"])
# Display timing data # Display timing data
shared.printLock.acquire() logger.info('Time spent processing this interesting broadcast:',
print 'Time spent processing this interesting broadcast:', time.time() - self.messageProcessingStartTime time.time() - self.messageProcessingStartTime)
shared.printLock.release()
# We have received a msg message. # We have received a msg message.
def recmsg(self, data): def recmsg(self, data):
self.messageProcessingStartTime = time.time() self.messageProcessingStartTime = time.time()
# First we must check to make sure the proof of work is sufficient. # First we must check to make sure the proof of work is sufficient.
if not self.isProofOfWorkSufficient(data): if not self.isProofOfWorkSufficient(data):
print 'Proof of work in msg message insufficient.' logger.info('Proof of work in msg message insufficient.')
return return
readPosition = 8 readPosition = 8
@ -940,10 +989,10 @@ class receiveDataThread(threading.Thread):
readPosition += 4 readPosition += 4
if embeddedTime > int(time.time()) + 10800: if embeddedTime > int(time.time()) + 10800:
print 'The time in the msg message is too new. Ignoring it. Time:', embeddedTime logger.warning('The time in the msg message is too new. Ignoring it. Time: %s', embeddedTime)
return return
if embeddedTime < int(time.time()) - maximumAgeOfAnObjectThatIAmWillingToAccept: if embeddedTime < int(time.time()) - maximumAgeOfAnObjectThatIAmWillingToAccept:
print 'The time in the msg message is too old. Ignoring it. Time:', embeddedTime logger.warning('The time in the msg message is too old. Ignoring it. Time:', embeddedTime)
return return
streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint( streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint(
data[readPosition:readPosition + 9]) data[readPosition:readPosition + 9])