# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#Right now, PyBitmessage only support connecting to stream 1. It doesn't yet contain logic to expand into further streams.
# Right now, PyBitmessage only support connecting to stream 1. It doesn't
# yet contain logic to expand into further streams.
#The software version variable is now held in shared.py
#The software version variable is now held in shared.py
verbose=1
maximumAgeOfAnObjectThatIAmWillingToAccept=216000#Equals two days and 12 hours.
lengthOfTimeToLeaveObjectsInInventory=237600#Equals two days and 18 hours. This should be longer than maximumAgeOfAnObjectThatIAmWillingToAccept so that we don't process messages twice.
lengthOfTimeToHoldOnToAllPubkeys=2419200#Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
maximumAgeOfObjectsThatIAdvertiseToOthers=216000#Equals two days and 12 hours
maximumAgeOfNodesThatIAdvertiseToOthers=10800#Equals three hours
storeConfigFilesInSameDirectoryAsProgramByDefault=False#The user may de-select Portable Mode in the settings if they want the config files to stay in the application data folder.
useVeryEasyProofOfWorkForTesting=False#If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
maximumAgeOfAnObjectThatIAmWillingToAccept=216000#Equals two days and 12 hours.
lengthOfTimeToLeaveObjectsInInventory=237600#Equals two days and 18 hours. This should be longer than maximumAgeOfAnObjectThatIAmWillingToAccept so that we don't process messages twice.
lengthOfTimeToHoldOnToAllPubkeys=2419200#Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
maximumAgeOfObjectsThatIAdvertiseToOthers=216000#Equals two days and 12 hours
maximumAgeOfNodesThatIAdvertiseToOthers=10800#Equals three hours
storeConfigFilesInSameDirectoryAsProgramByDefault=False#The user may de-select Portable Mode in the settings if they want the config files to stay in the application data folder.
useVeryEasyProofOfWorkForTesting=False#If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
encryptedBroadcastSwitchoverTime=1369735200
importsys
@ -33,174 +34,213 @@ import random
importsqlite3
importthreading
fromtimeimportstrftime,localtime,gmtime
importshutil#used for moving the messages.dat file
importshutil#used for moving the messages.dat file
importstring
importsocks
importhighlevelcrypto
frompyelliptic.opensslimportOpenSSL
importctypes
frompyellipticimportarithmetic
importsignal#Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully.
#The next 3 are used for the API
importsignal#Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully.
#The next 3 are used for the API
fromSimpleXMLRPCServerimport*
importjson
fromsubprocessimportcall#used when the API must execute an outside program
fromsubprocessimportcall#used when the API must execute an outside program
importsingleton
importproofofwork
#For each stream to which we connect, several outgoingSynSender threads will exist and will collectively create 8 connections with peers.
# For each stream to which we connect, several outgoingSynSender threads
# will exist and will collectively create 8 connections with peers.
classoutgoingSynSender(threading.Thread):
def__init__(self):
threading.Thread.__init__(self)
defsetup(self,streamNumber):
defsetup(self,streamNumber):
self.streamNumber=streamNumber
defrun(self):
time.sleep(1)
globalalreadyAttemptedConnectionsListResetTime
whileTrue:
iflen(selfInitiatedConnections[self.streamNumber])>=8:#maximum number of outgoing connections = 8
iflen(selfInitiatedConnections[self.streamNumber])>=8:#maximum number of outgoing connections = 8
#Clear out the alreadyAttemptedConnectionsList every half hour so that this program will again attempt a connection to any nodes, even ones it has already tried.
# Clear out the alreadyAttemptedConnectionsList every half
# hour so that this program will again attempt a connection
if(int(time.time())-timeLastSeen)>172800andlen(shared.knownNodes[self.streamNumber])>1000:# for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the shared.knownNodes data-structure.
PORT,timeLastSeen=shared.knownNodes[
self.streamNumber][HOST]
if(int(time.time())-timeLastSeen)>172800andlen(shared.knownNodes[self.streamNumber])>1000:# for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the shared.knownNodes data-structure.
shared.knownNodesLock.acquire()
delshared.knownNodes[self.streamNumber][HOST]
shared.knownNodesLock.release()
shared.printLock.acquire()
print'deleting ',HOST,'from shared.knownNodes because it is more than 48 hours old and we could not connect to it.'
if(int(time.time())-timeLastSeen)>172800andlen(shared.knownNodes[self.streamNumber])>1000:# for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
PORT,timeLastSeen=shared.knownNodes[
self.streamNumber][HOST]
if(int(time.time())-timeLastSeen)>172800andlen(shared.knownNodes[self.streamNumber])>1000:# for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
shared.knownNodesLock.acquire()
delshared.knownNodes[self.streamNumber][HOST]
shared.knownNodesLock.release()
shared.printLock.acquire()
print'deleting ',HOST,'from knownNodes because it is more than 48 hours old and we could not connect to it.'
shared.printLock.release()
exceptException,err:
sys.stderr.write('An exception has occurred in the outgoingSynSender thread that was not caught by other exception types: %s\n'%err)
exceptExceptionaserr:
sys.stderr.write(
'An exception has occurred in the outgoingSynSender thread that was not caught by other exception types: %s\n'%err)
time.sleep(0.1)
#Only one singleListener thread will ever exist. It creates the receiveDataThread and sendDataThread for each incoming connection. Note that it cannot set the stream number because it is not known yet- the other node will have to tell us its stream number in a version message. If we don't care about their stream, we will close the connection (within the recversion function of the recieveData thread)
# Only one singleListener thread will ever exist. It creates the
# receiveDataThread and sendDataThread for each incoming connection. Note
# that it cannot set the stream number because it is not known yet- the
# other node will have to tell us its stream number in a version message.
# If we don't care about their stream, we will close the connection
# (within the recversion function of the recieveData thread)
classsingleListener(threading.Thread):
def__init__(self):
threading.Thread.__init__(self)
defrun(self):
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If they eventually select proxy 'none' then this will start listening for connections.
# We don't want to accept incoming connections if the user is using a
# SOCKS proxy. If they eventually select proxy 'none' then this will
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If the user eventually select proxy 'none' then this will start listening for connections.
# We don't want to accept incoming connections if the user is using
# a SOCKS proxy. If the user eventually select proxy 'none' then
@ -208,85 +248,99 @@ class singleListener(threading.Thread):
print'We are connected to too many people. Not accepting further incoming connections for ten seconds.'
shared.printLock.release()
time.sleep(10)
a,(HOST,PORT)=sock.accept()
a,(HOST,PORT)=sock.accept()
#The following code will, unfortunately, block an incoming connection if someone else on the same LAN is already connected because the two computers will share the same external IP. This is here to prevent connection flooding.
# The following code will, unfortunately, block an incoming
# connection if someone else on the same LAN is already connected
# because the two computers will share the same external IP. This
# is here to prevent connection flooding.
whileHOSTinshared.connectedHostsList:
shared.printLock.acquire()
print'We are already connected to',HOST+'. Ignoring connection.'
print'We are already connected to',HOST+'. Ignoring connection.'
shared.connectedHostsList[self.HOST]=0#The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that an outgoingSynSender thread doesn't try to connect to it.
self.connectionIsOrWasFullyEstablished=False#set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
ifself.streamNumber==-1:#This was an incoming connection. Send out a version message if we accept the other node's version message.
shared.connectedHostsList[
self.HOST]=0# The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that an outgoingSynSender thread doesn't try to connect to it.
self.connectionIsOrWasFullyEstablished=False# set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
ifself.streamNumber==-1:# This was an incoming connection. Send out a version message if we accept the other node's version message.
self.initiatedConnection=False
else:
self.initiatedConnection=True
selfInitiatedConnections[streamNumber][self]=0
self.ackDataThatWeHaveYetToSend=[]#When we receive a message bound for us, we store the acknowledgement that we need to send (the ackdata) here until we are done processing all other data received from this peer.
self.ackDataThatWeHaveYetToSend=[
]# When we receive a message bound for us, we store the acknowledgement that we need to send (the ackdata) here until we are done processing all other data received from this peer.
print'The size of the connectedHostsList is now:',len(shared.connectedHostsList)
shared.printLock.release()
defprocessData(self):
globalverbose
#if verbose >= 3:
#shared.printLock.acquire()
#print 'self.data is currently ', repr(self.data)
#shared.printLock.release()
iflen(self.data)<20:#if so little of the data has arrived that we can't even unpack the payload length
#if verbose >= 3:
#shared.printLock.acquire()
#print 'self.data is currently ', repr(self.data)
#shared.printLock.release()
iflen(self.data)<20:#if so little of the data has arrived that we can't even unpack the payload length
return
ifself.data[0:4]!='\xe9\xbe\xb4\xd9':
ifverbose>=1:
shared.printLock.acquire()
print'The magic bytes were not correct. First 40 bytes of data: '+repr(self.data[0:40])
print'The magic bytes were not correct. First 40 bytes of data: '+repr(self.data[0:40])
shared.printLock.release()
self.data=""
return
self.payloadLength,=unpack('>L',self.data[16:20])
iflen(self.data)<self.payloadLength+24:#check if the whole message has arrived yet.
self.payloadLength,=unpack('>L',self.data[16:20])
iflen(self.data)<self.payloadLength+24:#check if the whole message has arrived yet.
return
ifself.data[20:24]!=hashlib.sha512(self.data[24:self.payloadLength+24]).digest()[0:4]:#test the checksum in the message. If it is correct...
ifself.data[20:24]!=hashlib.sha512(self.data[24:self.payloadLength+24]).digest()[0:4]:#test the checksum in the message. If it is correct...
print'Checksum incorrect. Clearing this message.'
self.data=self.data[self.payloadLength+24:]
self.data=self.data[self.payloadLength+24:]
self.processData()
return
#The time we've last seen this node is obviously right now since we just received valid data from it. So update the knownNodes list so that other peers can be made aware of its existance.
ifself.initiatedConnectionandself.connectionIsOrWasFullyEstablished:#The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port).
# The time we've last seen this node is obviously right now since we
# just received valid data from it. So update the knownNodes list so
# that other peers can be made aware of its existance.
ifself.initiatedConnectionandself.connectionIsOrWasFullyEstablished:# The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port).
ifself.payloadLength<=180000000:#If the size of the message is greater than 180MB, ignore it. (I get memory errors when processing messages much larger than this though it is concievable that this value will have to be lowered if some systems are less tolarant of large messages.)
ifself.payloadLength<=180000000:#If the size of the message is greater than 180MB, ignore it. (I get memory errors when processing messages much larger than this though it is concievable that this value will have to be lowered if some systems are less tolarant of large messages.)
delself.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave[objectHash]#It is possible that the remote node doesn't respond with the object. In that case, we'll very likely get it from someone else anyway.
print'(concerning',self.HOST+')','number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now',len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave)
shared.printLock.release()
try:
delnumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[self.HOST]#this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
self.HOST]# this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
except:
pass
break
@ -401,35 +465,41 @@ class receiveDataThread(threading.Thread):
print'(concerning',self.HOST+')','number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now',len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave)
shared.printLock.release()
try:
delnumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[self.HOST]#this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
self.HOST]# this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
print'(concerning',self.HOST+')','number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now',len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave)
shared.printLock.release()
numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer[self.HOST]=len(self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave)#this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
self.objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave)# this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
self.sock.settimeout(600)#We'll send out a pong every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
shared.sqlSubmitQueue.put('''SELECT hash FROM inventory WHERE ((receivedtime>? and objecttype<>'pubkey') or (receivedtime>? and objecttype='pubkey')) and streamnumber=?''')
# Select all hashes which are younger than two days old and in this
#Now let us start appending all of these hashes together. They will be sent out in a big inv message to our new peer.
# Now let us start appending all of these hashes together. They will be
# sent out in a big inv message to our new peer.
forhash,storedValueinbigInvList.items():
payload+=hash
numberOfObjectsInInvMessage+=1
ifnumberOfObjectsInInvMessage>=50000:#We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
ifnumberOfObjectsInInvMessage>=50000:# We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
print'The embedded time in this broadcast message is too old. Ignoring message.'
return
iflen(data)<180:
print'The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.'
return
#Let us check to make sure the stream number is correct (thus preventing an individual from sending broadcasts out on the wrong streams or all streams).
print'The stream number encoded in this broadcast message ('+str(streamNumber)+') does not match the stream number on which it was received. Ignoring it.'
return
@ -558,121 +644,150 @@ class receiveDataThread(threading.Thread):
print'We have already received this broadcast object (it is stored on disk in the SQL inventory). Ignoring it.'
shared.inventoryLock.release()
return
#It is valid so far. Let's let our peers know about it.
#It is valid so far. Let's let our peers know about it.
self.processbroadcast(readPosition,data)#When this function returns, we will have either successfully processed this broadcast because we are interested in it, ignored it because we aren't interested in it, or found problem with the broadcast that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are mostly the same values used for msg messages although broadcast messages are processed faster.
iflen(data)>100000000:#Size is greater than 100 megabytes
readPosition,data)# When this function returns, we will have either successfully processed this broadcast because we are interested in it, ignored it because we aren't interested in it, or found problem with the broadcast that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we
# haven't used the specified amount of time, we shall sleep. These
# values are mostly the same values used for msg messages although
# broadcast messages are processed faster.
iflen(data)>100000000:# Size is greater than 100 megabytes
print'Cannot decode incoming broadcast versions higher than 2. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.'
print'Cannot decode incoming broadcast versions higher than 2. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.'
return
ifbroadcastVersion==1:
beginningOfPubkeyPosition=readPosition#used when we add the pubkey to our pubkey table
#Cannot decode senderAddressVersion higher than 2. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
beginningOfPubkeyPosition=readPosition# used when we add the pubkey to our pubkey table
print'Time spent deciding that we are not interested in this v1 broadcast:',time.time()-self.messageProcessingStartTime
print'Time spent deciding that we are not interested in this v1 broadcast:',time.time()-self.messageProcessingStartTime
shared.printLock.release()
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
# At this point, this message claims to be from sendersHash and
# we are interested in it. We still have to hash the public key
# to make sure it is truly the key that matches the hash, and
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce or time (which would let us send out a pubkey message) so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
shared.sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?)''')
shared.sqlSubmitQueue.put(
'''INSERT INTO pubkeys VALUES (?,?,?,?)''')
shared.sqlSubmitQueue.put(t)
shared.sqlReturnQueue.get()
shared.sqlSubmitQueue.put('commit')
shared.sqlLock.release()
#shared.workerQueue.put(('newpubkey',(sendersAddressVersion,sendersStream,ripe.digest()))) #This will check to see whether we happen to be awaiting this pubkey in order to send a message. If we are, it will do the POW and send it.
toRipe=key#This is the RIPE hash of the sender's pubkey. We need this below to compare to the RIPE hash of the sender's address to verify that it was encrypted by with their key rather than some other key.
toRipe=key#This is the RIPE hash of the sender's pubkey. We need this below to compare to the RIPE hash of the sender's address to verify that it was encrypted by with their key rather than some other key.
initialDecryptionSuccessful=True
print'EC decryption successful using key associated with ripe hash:',key.encode('hex')
break
exceptException,err:
exceptExceptionaserr:
pass
#print 'cryptorObject.decrypt Exception:', err
#print 'cryptorObject.decrypt Exception:', err
ifnotinitialDecryptionSuccessful:
#This is not a broadcast I am interested in.
#This is not a broadcast I am interested in.
shared.printLock.acquire()
print'Length of time program spent failing to decrypt this v2 broadcast:',time.time()-self.messageProcessingStartTime,'seconds.'
print'Length of time program spent failing to decrypt this v2 broadcast:',time.time()-self.messageProcessingStartTime,'seconds.'
shared.printLock.release()
return
#At this point this is a broadcast I have decrypted and thus am interested in.
print'Cannot decode senderAddressVersion other than 2 or 3. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.'
print'Cannot decode senderAddressVersion other than 2 or 3. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.'
print'The stream number outside of the encryption on which the POW was completed doesn\'t match the stream number inside the encryption. Ignoring broadcast.'
shared.sqlSubmitQueue.put('''INSERT INTO pubkeys VALUES (?,?,?,?)''')
shared.sqlSubmitQueue.put(
'''INSERT INTO pubkeys VALUES (?,?,?,?)''')
shared.sqlSubmitQueue.put(t)
shared.sqlReturnQueue.get()
shared.sqlSubmitQueue.put('commit')
shared.sqlLock.release()
#shared.workerQueue.put(('newpubkey',(sendersAddressVersion,sendersStream,ripe.digest()))) #This will check to see whether we happen to be awaiting this pubkey in order to send a message. If we are, it will do the POW and send it.
print'The stream number encoded in this msg ('+str(streamNumberAsClaimedByMsg)+') message does not match the stream number on which it was received. Ignoring it.'
return
@ -884,88 +1032,99 @@ class receiveDataThread(threading.Thread):
print'We have already received this msg message (it is stored on disk in the SQL inventory). Ignoring it.'
shared.inventoryLock.release()
return
#This msg message is valid. Let's let our peers know about it.
#This msg message is valid. Let's let our peers know about it.
self.processmsg(readPosition,data)#When this function returns, we will have either successfully processed the message bound for us, ignored it because it isn't bound for us, or found problem with the message that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are based on test timings and you may change them at-will.
iflen(data)>100000000:#Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage=100#seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 MB message: 3.7 seconds.
eliflen(data)>10000000:#Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage=20#seconds. Actual length of time it took my computer to decrypt and verify the signature of a 10 MB message: 0.53 seconds. Actual length of time it takes in practice when processing a real message: 1.44 seconds.
eliflen(data)>1000000:#Between 10 and 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage=3#seconds. Actual length of time it took my computer to decrypt and verify the signature of a 1 MB message: 0.18 seconds. Actual length of time it takes in practice when processing a real message: 0.30 seconds.
else:#Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage=.6#seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 KB message: 0.15 seconds. Actual length of time it takes in practice when processing a real message: 0.25 seconds.
readPosition,data)# When this function returns, we will have either successfully processed the message bound for us, ignored it because it isn't bound for us, or found problem with the message that warranted ignoring it.
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we
# haven't used the specified amount of time, we shall sleep. These
# values are based on test timings and you may change them at-will.
iflen(data)>100000000:# Size is greater than 100 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage=100# seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 MB message: 3.7 seconds.
eliflen(data)>10000000:# Between 100 and 10 megabytes
lengthOfTimeWeShouldUseToProcessThisMessage=20# seconds. Actual length of time it took my computer to decrypt and verify the signature of a 10 MB message: 0.53 seconds. Actual length of time it takes