2015-08-22 10:48:49 +02:00
from __future__ import division
2015-01-21 18:38:25 +01:00
2013-06-24 21:51:01 +02:00
verbose = 1
2014-08-27 09:14:32 +02:00
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000 # This is obsolete with the change to protocol v3 but the singleCleaner thread still hasn't been updated so we need this a little longer.
2013-06-24 21:51:01 +02:00
lengthOfTimeToHoldOnToAllPubkeys = 2419200 # Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 # Equals three hours
2013-09-18 06:04:01 +02:00
useVeryEasyProofOfWorkForTesting = False # If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
2013-06-24 21:51:01 +02:00
2013-05-02 17:53:54 +02:00
2013-07-08 22:21:29 +02:00
# Libraries.
import os
import sys
import stat
import threading
import time
2014-08-27 09:14:32 +02:00
import traceback
2016-03-23 23:26:57 +01:00
from binascii import hexlify
2013-07-08 22:21:29 +02:00
# Project imports.
from addresses import *
2017-02-22 09:34:54 +01:00
from bmconfigparser import BMConfigParser
2013-06-24 21:51:01 +02:00
import highlevelcrypto
2014-08-27 09:14:32 +02:00
#import helper_startup
2013-08-29 14:03:45 +02:00
from helper_sql import *
2017-03-19 22:08:00 +01:00
from inventory import Inventory
2017-02-08 13:41:56 +01:00
from queues import objectProcessorQueue
2017-01-11 17:00:00 +01:00
import protocol
import state
2013-05-02 17:53:54 +02:00
myECCryptorObjects = { }
MyECSubscriptionCryptorObjects = { }
myAddressesByHash = { } #The key in this dictionary is the RIPE hash which is encoded in an address and value is the address itself.
2013-09-15 03:06:26 +02:00
myAddressesByTag = { } # The key in this dictionary is the tag generated from the address.
2013-05-02 17:53:54 +02:00
broadcastSendersForWhichImWatching = { }
printLock = threading . Lock ( )
statusIconColor = ' red '
2013-05-03 18:05:57 +02:00
connectedHostsList = { } #List of hosts to which we are connected. Used to guarantee that the outgoingSynSender threads won't connect to the same remote node twice.
2016-01-26 13:01:28 +01:00
thisapp = None # singleton lock instance
2013-06-24 21:51:01 +02:00
alreadyAttemptedConnectionsList = {
} # This is a list of nodes to which we have already attempted a connection
alreadyAttemptedConnectionsListLock = threading . Lock ( )
alreadyAttemptedConnectionsListResetTime = int (
time . time ( ) ) # used to clear out the alreadyAttemptedConnectionsList periodically so that we will retry connecting to hosts to which we have already tried to connect.
successfullyDecryptMessageTimings = [
] # A list of the amounts of time it took to successfully decrypt msg messages
ackdataForWhichImWatching = { }
2013-08-25 01:40:48 +02:00
clientHasReceivedIncomingConnections = False #used by API command clientStatus
numberOfMessagesProcessed = 0
numberOfBroadcastsProcessed = 0
numberOfPubkeysProcessed = 0
2018-01-25 11:58:29 +01:00
2013-09-10 01:26:32 +02:00
needToWriteKnownNodesToDisk = False # If True, the singleCleaner will write it to disk eventually.
2013-11-04 08:05:07 +01:00
maximumLengthOfTimeToBotherResendingMessages = 0
2016-10-23 10:12:49 +02:00
timeOffsetWrongCount = 0
2013-05-02 17:53:54 +02:00
def isAddressInMyAddressBook ( address ) :
2013-08-29 14:03:45 +02:00
queryreturn = sqlQuery (
''' select address from addressbook where address=? ''' ,
address )
2013-05-02 17:53:54 +02:00
return queryreturn != [ ]
2013-06-14 03:55:38 +02:00
#At this point we should really just have a isAddressInMy(book, address)...
def isAddressInMySubscriptionsList ( address ) :
2013-08-29 16:00:27 +02:00
queryreturn = sqlQuery (
2013-08-29 14:03:45 +02:00
''' select * from subscriptions where address=? ''' ,
str ( address ) )
2013-06-14 03:55:38 +02:00
return queryreturn != [ ]
2013-06-14 04:03:03 +02:00
2013-05-02 17:53:54 +02:00
def isAddressInMyAddressBookSubscriptionsListOrWhitelist ( address ) :
if isAddressInMyAddressBook ( address ) :
return True
2013-08-29 14:03:45 +02:00
queryreturn = sqlQuery ( ''' SELECT address FROM whitelist where address=? and enabled = ' 1 ' ''' , address )
2013-05-02 17:53:54 +02:00
if queryreturn < > [ ] :
return True
2013-08-29 16:00:27 +02:00
queryreturn = sqlQuery (
2013-08-29 14:03:45 +02:00
''' select address from subscriptions where address=? and enabled = ' 1 ' ''' ,
address )
2013-05-02 17:53:54 +02:00
if queryreturn < > [ ] :
return True
return False
def decodeWalletImportFormat ( WIFstring ) :
fullString = arithmetic . changebase ( WIFstring , 58 , 256 )
privkey = fullString [ : - 4 ]
if fullString [ - 4 : ] != hashlib . sha256 ( hashlib . sha256 ( privkey ) . digest ( ) ) . digest ( ) [ : 4 ] :
2013-08-08 21:37:48 +02:00
logger . critical ( ' Major problem! When trying to decode one of your private keys, the checksum '
2013-11-20 07:29:37 +01:00
' failed. Here are the first 6 characters of the PRIVATE key: %s ' % str ( WIFstring ) [ : 6 ] )
os . _exit ( 0 )
2013-05-02 17:53:54 +02:00
return " "
else :
#checksum passed
if privkey [ 0 ] == ' \x80 ' :
return privkey [ 1 : ]
else :
2013-08-08 21:37:48 +02:00
logger . critical ( ' Major problem! When trying to decode one of your private keys, the '
2013-07-10 10:43:18 +02:00
' checksum passed but the key doesn \' t begin with hex 80. Here is the '
2013-08-08 21:37:48 +02:00
' PRIVATE key: %s ' % str ( WIFstring ) )
2013-11-20 07:29:37 +01:00
os . _exit ( 0 )
2013-05-02 17:53:54 +02:00
return " "
def reloadMyAddressHashes ( ) :
2013-07-10 20:50:18 +02:00
logger . debug ( ' reloading keys from keys.dat file ' )
2013-05-02 17:53:54 +02:00
myECCryptorObjects . clear ( )
myAddressesByHash . clear ( )
2013-09-15 03:06:26 +02:00
myAddressesByTag . clear ( )
2013-05-02 17:53:54 +02:00
#myPrivateKeys.clear()
2013-06-27 12:02:52 +02:00
2017-01-11 17:00:00 +01:00
keyfileSecure = checkSensitiveFilePermissions ( state . appdata + ' keys.dat ' )
2013-06-27 12:02:52 +02:00
hasEnabledKeys = False
2017-05-15 12:18:07 +02:00
for addressInKeysFile in BMConfigParser ( ) . addresses ( ) :
isEnabled = BMConfigParser ( ) . getboolean ( addressInKeysFile , ' enabled ' )
if isEnabled :
hasEnabledKeys = True
status , addressVersionNumber , streamNumber , hash = decodeAddress ( addressInKeysFile )
if addressVersionNumber == 2 or addressVersionNumber == 3 or addressVersionNumber == 4 :
# Returns a simple 32 bytes of information encoded in 64 Hex characters,
# or null if there was an error.
privEncryptionKey = hexlify ( decodeWalletImportFormat (
BMConfigParser ( ) . get ( addressInKeysFile , ' privencryptionkey ' ) ) )
if len ( privEncryptionKey ) == 64 : #It is 32 bytes encoded as 64 hex characters
myECCryptorObjects [ hash ] = highlevelcrypto . makeCryptor ( privEncryptionKey )
myAddressesByHash [ hash ] = addressInKeysFile
tag = hashlib . sha512 ( hashlib . sha512 ( encodeVarint (
addressVersionNumber ) + encodeVarint ( streamNumber ) + hash ) . digest ( ) ) . digest ( ) [ 32 : ]
myAddressesByTag [ tag ] = addressInKeysFile
else :
logger . error ( ' Error in reloadMyAddressHashes: Can \' t handle address versions other than 2, 3, or 4. \n ' )
2013-06-27 12:44:49 +02:00
if not keyfileSecure :
2017-01-11 17:00:00 +01:00
fixSensitiveFilePermissions ( state . appdata + ' keys.dat ' , hasEnabledKeys )
2013-05-02 17:53:54 +02:00
def reloadBroadcastSendersForWhichImWatching ( ) :
broadcastSendersForWhichImWatching . clear ( )
MyECSubscriptionCryptorObjects . clear ( )
2013-08-29 14:03:45 +02:00
queryreturn = sqlQuery ( ' SELECT address FROM subscriptions where enabled=1 ' )
2013-09-15 03:06:26 +02:00
logger . debug ( ' reloading subscriptions... ' )
2013-05-02 17:53:54 +02:00
for row in queryreturn :
address , = row
status , addressVersionNumber , streamNumber , hash = decodeAddress ( address )
if addressVersionNumber == 2 :
broadcastSendersForWhichImWatching [ hash ] = 0
#Now, for all addresses, even version 2 addresses, we should create Cryptor objects in a dictionary which we will use to attempt to decrypt encrypted broadcast messages.
2013-09-15 03:06:26 +02:00
if addressVersionNumber < = 3 :
privEncryptionKey = hashlib . sha512 ( encodeVarint ( addressVersionNumber ) + encodeVarint ( streamNumber ) + hash ) . digest ( ) [ : 32 ]
2016-03-23 23:26:57 +01:00
MyECSubscriptionCryptorObjects [ hash ] = highlevelcrypto . makeCryptor ( hexlify ( privEncryptionKey ) )
2013-09-15 03:06:26 +02:00
else :
doubleHashOfAddressData = hashlib . sha512 ( hashlib . sha512 ( encodeVarint (
addressVersionNumber ) + encodeVarint ( streamNumber ) + hash ) . digest ( ) ) . digest ( )
tag = doubleHashOfAddressData [ 32 : ]
privEncryptionKey = doubleHashOfAddressData [ : 32 ]
2016-03-23 23:26:57 +01:00
MyECSubscriptionCryptorObjects [ tag ] = highlevelcrypto . makeCryptor ( hexlify ( privEncryptionKey ) )
2013-05-02 17:53:54 +02:00
2013-06-11 00:53:15 +02:00
def fixPotentiallyInvalidUTF8Data ( text ) :
try :
unicode ( text , ' utf-8 ' )
return text
except :
output = ' Part of the message is corrupt. The message cannot be displayed the normal way. \n \n ' + repr ( text )
2013-06-14 04:03:03 +02:00
return output
2013-07-14 22:12:59 +02:00
2013-06-27 12:44:49 +02:00
# Checks sensitive file permissions for inappropriate umask during keys.dat creation.
# (Or unwise subsequent chmod.)
2013-07-10 10:43:18 +02:00
#
2013-06-27 12:44:49 +02:00
# Returns true iff file appears to have appropriate permissions.
2013-06-27 12:02:52 +02:00
def checkSensitiveFilePermissions ( filename ) :
if sys . platform == ' win32 ' :
# TODO: This might deserve extra checks by someone familiar with
# Windows systems.
2013-06-27 12:44:49 +02:00
return True
2013-11-29 01:20:16 +01:00
elif sys . platform [ : 7 ] == ' freebsd ' :
# FreeBSD file systems are the same as major Linux file systems
present_permissions = os . stat ( filename ) [ 0 ]
disallowed_permissions = stat . S_IRWXG | stat . S_IRWXO
return present_permissions & disallowed_permissions == 0
2013-06-27 12:02:52 +02:00
else :
2013-08-05 22:06:46 +02:00
try :
# Skip known problems for non-Win32 filesystems without POSIX permissions.
import subprocess
fstype = subprocess . check_output ( ' stat -f -c " %% T " %s ' % ( filename ) ,
shell = True ,
stderr = subprocess . STDOUT )
if ' fuseblk ' in fstype :
logger . info ( ' Skipping file permissions check for %s . Filesystem fuseblk detected. ' ,
filename )
return True
except :
# Swallow exception here, but we might run into trouble later!
2013-08-13 02:32:13 +02:00
logger . error ( ' Could not determine filesystem type. %s ' , filename )
2013-06-27 12:44:49 +02:00
present_permissions = os . stat ( filename ) [ 0 ]
disallowed_permissions = stat . S_IRWXG | stat . S_IRWXO
return present_permissions & disallowed_permissions == 0
2013-06-27 12:02:52 +02:00
# Fixes permissions on a sensitive file.
2013-06-27 12:44:49 +02:00
def fixSensitiveFilePermissions ( filename , hasEnabledKeys ) :
if hasEnabledKeys :
2013-07-08 22:21:29 +02:00
logger . warning ( ' Keyfile had insecure permissions, and there were enabled keys. '
' The truly paranoid should stop using them immediately. ' )
2013-06-27 12:44:49 +02:00
else :
2013-07-08 22:21:29 +02:00
logger . warning ( ' Keyfile had insecure permissions, but there were no enabled keys. ' )
2013-06-27 12:44:49 +02:00
try :
present_permissions = os . stat ( filename ) [ 0 ]
disallowed_permissions = stat . S_IRWXG | stat . S_IRWXO
allowed_permissions = ( ( 1 << 32 ) - 1 ) ^ disallowed_permissions
new_permissions = (
allowed_permissions & present_permissions )
os . chmod ( filename , new_permissions )
2013-07-08 22:21:29 +02:00
logger . info ( ' Keyfile permissions automatically fixed. ' )
2013-06-27 12:44:49 +02:00
except Exception , e :
2013-07-08 22:21:29 +02:00
logger . exception ( ' Keyfile permissions could not be fixed. ' )
2013-06-27 12:44:49 +02:00
raise
2013-08-08 21:37:48 +02:00
def isBitSetWithinBitfield ( fourByteString , n ) :
# Uses MSB 0 bit numbering across 4 bytes of data
n = 31 - n
x , = unpack ( ' >L ' , fourByteString )
return x & 2 * * n != 0
2013-06-26 14:28:01 +02:00
2014-08-27 09:14:32 +02:00
def decryptAndCheckPubkeyPayload ( data , address ) :
"""
2014-12-25 09:57:34 +01:00
Version 4 pubkeys are encrypted . This function is run when we already have the
address to which we want to try to send a message . The ' data ' may come either
off of the wire or we might have had it already in our inventory when we tried
to send a msg to this particular address .
2014-08-27 09:14:32 +02:00
"""
2013-09-18 06:04:01 +02:00
try :
2014-08-27 09:14:32 +02:00
status , addressVersion , streamNumber , ripe = decodeAddress ( address )
readPosition = 20 # bypass the nonce, time, and object type
embeddedAddressVersion , varintLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
embeddedStreamNumber , varintLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
2014-12-25 09:57:34 +01:00
storedData = data [ 20 : readPosition ] # We'll store the address version and stream number (and some more) in the pubkeys table.
2014-08-27 09:14:32 +02:00
if addressVersion != embeddedAddressVersion :
logger . info ( ' Pubkey decryption was UNsuccessful due to address version mismatch. ' )
2013-09-18 06:04:01 +02:00
return ' failed '
2014-08-27 09:14:32 +02:00
if streamNumber != embeddedStreamNumber :
logger . info ( ' Pubkey decryption was UNsuccessful due to stream number mismatch. ' )
return ' failed '
tag = data [ readPosition : readPosition + 32 ]
readPosition + = 32
2014-12-25 09:57:34 +01:00
signedData = data [ 8 : readPosition ] # the time through the tag. More data is appended onto signedData below after the decryption.
2014-08-27 09:14:32 +02:00
encryptedData = data [ readPosition : ]
# Let us try to decrypt the pubkey
2017-01-13 09:30:23 +01:00
toAddress , cryptorObject = state . neededPubkeys [ tag ]
2014-08-27 09:14:32 +02:00
if toAddress != address :
logger . critical ( ' decryptAndCheckPubkeyPayload failed due to toAddress mismatch. This is very peculiar. toAddress: %s , address %s ' % ( toAddress , address ) )
# the only way I can think that this could happen is if someone encodes their address data two different ways.
2014-12-25 09:57:34 +01:00
# That sort of address-malleability should have been caught by the UI or API and an error given to the user.
2014-08-27 09:14:32 +02:00
return ' failed '
try :
decryptedData = cryptorObject . decrypt ( encryptedData )
except :
# Someone must have encrypted some data with a different key
# but tagged it with a tag for which we are watching.
logger . info ( ' Pubkey decryption was unsuccessful. ' )
return ' failed '
readPosition = 0
bitfieldBehaviors = decryptedData [ readPosition : readPosition + 4 ]
readPosition + = 4
publicSigningKey = ' \x04 ' + decryptedData [ readPosition : readPosition + 64 ]
readPosition + = 64
publicEncryptionKey = ' \x04 ' + decryptedData [ readPosition : readPosition + 64 ]
readPosition + = 64
specifiedNonceTrialsPerByte , specifiedNonceTrialsPerByteLength = decodeVarint (
decryptedData [ readPosition : readPosition + 10 ] )
readPosition + = specifiedNonceTrialsPerByteLength
specifiedPayloadLengthExtraBytes , specifiedPayloadLengthExtraBytesLength = decodeVarint (
decryptedData [ readPosition : readPosition + 10 ] )
readPosition + = specifiedPayloadLengthExtraBytesLength
2014-12-25 09:57:34 +01:00
storedData + = decryptedData [ : readPosition ]
signedData + = decryptedData [ : readPosition ]
2014-08-27 09:14:32 +02:00
signatureLength , signatureLengthLength = decodeVarint (
decryptedData [ readPosition : readPosition + 10 ] )
readPosition + = signatureLengthLength
signature = decryptedData [ readPosition : readPosition + signatureLength ]
2016-03-23 23:26:57 +01:00
if highlevelcrypto . verify ( signedData , signature , hexlify ( publicSigningKey ) ) :
2014-12-25 09:57:34 +01:00
logger . info ( ' ECDSA verify passed (within decryptAndCheckPubkeyPayload) ' )
2014-08-27 09:14:32 +02:00
else :
2014-12-25 09:57:34 +01:00
logger . info ( ' ECDSA verify failed (within decryptAndCheckPubkeyPayload) ' )
return ' failed '
2014-08-27 09:14:32 +02:00
sha = hashlib . new ( ' sha512 ' )
sha . update ( publicSigningKey + publicEncryptionKey )
ripeHasher = hashlib . new ( ' ripemd160 ' )
ripeHasher . update ( sha . digest ( ) )
embeddedRipe = ripeHasher . digest ( )
if embeddedRipe != ripe :
# Although this pubkey object had the tag were were looking for and was
# encrypted with the correct encryption key, it doesn't contain the
2014-12-25 09:57:34 +01:00
# correct pubkeys. Someone is either being malicious or using buggy software.
2014-08-27 09:14:32 +02:00
logger . info ( ' Pubkey decryption was UNsuccessful due to RIPE mismatch. ' )
return ' failed '
# Everything checked out. Insert it into the pubkeys table.
logger . info ( ' within decryptAndCheckPubkeyPayload, addressVersion: %s , streamNumber: %s \n \
ripe % s \n \
publicSigningKey in hex : % s \n \
publicEncryptionKey in hex : % s ' % (addressVersion,
streamNumber ,
2016-03-23 23:26:57 +01:00
hexlify ( ripe ) ,
hexlify ( publicSigningKey ) ,
hexlify ( publicEncryptionKey )
2014-08-27 09:14:32 +02:00
)
)
2015-03-09 07:35:32 +01:00
t = ( address , addressVersion , storedData , int ( time . time ( ) ) , ' yes ' )
2014-08-27 09:14:32 +02:00
sqlExecute ( ''' INSERT INTO pubkeys VALUES (?,?,?,?,?) ''' , * t )
return ' successful '
except varintDecodeError as e :
logger . info ( ' Pubkey decryption was UNsuccessful due to a malformed varint. ' )
2013-09-18 06:04:01 +02:00
return ' failed '
2014-08-27 09:14:32 +02:00
except Exception as e :
logger . critical ( ' Pubkey decryption was UNsuccessful because of an unhandled exception! This is definitely a bug! \n %s ' % traceback . format_exc ( ) )
2013-09-18 06:04:01 +02:00
return ' failed '
2014-08-27 09:14:32 +02:00
def checkAndShareObjectWithPeers ( data ) :
"""
2014-09-10 22:47:51 +02:00
This function is called after either receiving an object off of the wire
2014-08-27 09:14:32 +02:00
or after receiving one as ackdata .
Returns the length of time that we should reserve to process this message
if we are receiving it off of the wire .
"""
2014-09-10 22:47:51 +02:00
if len ( data ) > 2 * * 18 :
logger . info ( ' The payload length of this object is too large ( %s bytes). Ignoring it. ' % len ( data ) )
2015-01-28 20:14:28 +01:00
return 0
2013-11-20 07:29:37 +01:00
# Let us check to make sure that the proof of work is sufficient.
2017-01-11 17:00:00 +01:00
if not protocol . isProofOfWorkSufficient ( data ) :
2014-08-27 09:14:32 +02:00
logger . info ( ' Proof of work is insufficient. ' )
return 0
endOfLifeTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
if endOfLifeTime - int ( time . time ( ) ) > 28 * 24 * 60 * 60 + 10800 : # The TTL may not be larger than 28 days + 3 hours of wiggle room
logger . info ( ' This object \' s End of Life time is too far in the future. Ignoring it. Time is %s ' % endOfLifeTime )
return 0
if endOfLifeTime - int ( time . time ( ) ) < - 3600 : # The EOL time was more than an hour ago. That's too much.
logger . info ( ' This object \' s End of Life time was more than an hour ago. Ignoring the object. Time is %s ' % endOfLifeTime )
return 0
intObjectType , = unpack ( ' >I ' , data [ 16 : 20 ] )
try :
if intObjectType == 0 :
_checkAndShareGetpubkeyWithPeers ( data )
return 0.1
elif intObjectType == 1 :
_checkAndSharePubkeyWithPeers ( data )
return 0.1
elif intObjectType == 2 :
_checkAndShareMsgWithPeers ( data )
return 0.6
elif intObjectType == 3 :
_checkAndShareBroadcastWithPeers ( data )
return 0.6
else :
_checkAndShareUndefinedObjectWithPeers ( data )
return 0.6
except varintDecodeError as e :
logger . debug ( " There was a problem with a varint while checking to see whether it was appropriate to share an object with peers. Some details: %s " % e )
except Exception as e :
logger . critical ( ' There was a problem while checking to see whether it was appropriate to share an object with peers. This is definitely a bug! \n %s ' % traceback . format_exc ( ) )
return 0
2013-11-20 07:29:37 +01:00
2014-08-27 09:14:32 +02:00
def _checkAndShareUndefinedObjectWithPeers ( data ) :
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass nonce, time, and object type
objectVersion , objectVersionLength = decodeVarint (
data [ readPosition : readPosition + 9 ] )
readPosition + = objectVersionLength
streamNumber , streamNumberLength = decodeVarint (
data [ readPosition : readPosition + 9 ] )
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2014-08-27 09:14:32 +02:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' % streamNumber )
2014-01-17 02:10:04 +01:00
return
2014-08-27 09:14:32 +02:00
inventoryHash = calculateInventoryHash ( data )
2017-01-10 21:15:35 +01:00
if inventoryHash in Inventory ( ) :
2014-08-27 09:14:32 +02:00
logger . debug ( ' We have already received this undefined object. Ignoring. ' )
return
objectType , = unpack ( ' >I ' , data [ 16 : 20 ] )
2017-01-10 21:15:35 +01:00
Inventory ( ) [ inventoryHash ] = (
2014-08-27 09:14:32 +02:00
objectType , streamNumber , data , embeddedTime , ' ' )
2016-03-23 23:26:57 +01:00
logger . debug ( ' advertising inv with hash: %s ' % hexlify ( inventoryHash ) )
2017-01-11 17:00:00 +01:00
protocol . broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
2014-08-27 09:14:32 +02:00
def _checkAndShareMsgWithPeers ( data ) :
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass nonce, time, and object type
2015-01-28 20:14:28 +01:00
objectVersion , objectVersionLength = decodeVarint (
data [ readPosition : readPosition + 9 ] )
readPosition + = objectVersionLength
2014-08-27 09:14:32 +02:00
streamNumber , streamNumberLength = decodeVarint (
2013-11-20 07:29:37 +01:00
data [ readPosition : readPosition + 9 ] )
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2014-08-27 09:14:32 +02:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' % streamNumber )
2013-11-20 07:29:37 +01:00
return
2014-08-27 09:14:32 +02:00
readPosition + = streamNumberLength
2013-11-20 07:29:37 +01:00
inventoryHash = calculateInventoryHash ( data )
2017-01-10 21:15:35 +01:00
if inventoryHash in Inventory ( ) :
2014-01-17 02:10:04 +01:00
logger . debug ( ' We have already received this msg message. Ignoring. ' )
2013-11-20 07:29:37 +01:00
return
# This msg message is valid. Let's let our peers know about it.
2014-08-27 09:14:32 +02:00
objectType = 2
2017-01-10 21:15:35 +01:00
Inventory ( ) [ inventoryHash ] = (
2014-08-27 09:14:32 +02:00
objectType , streamNumber , data , embeddedTime , ' ' )
2016-03-23 23:26:57 +01:00
logger . debug ( ' advertising inv with hash: %s ' % hexlify ( inventoryHash ) )
2017-01-11 17:00:00 +01:00
protocol . broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
2013-11-20 07:29:37 +01:00
# Now let's enqueue it to be processed ourselves.
2016-01-22 11:17:10 +01:00
objectProcessorQueue . put ( ( objectType , data ) )
2013-11-20 07:29:37 +01:00
2014-08-27 09:14:32 +02:00
def _checkAndShareGetpubkeyWithPeers ( data ) :
if len ( data ) < 42 :
logger . info ( ' getpubkey message doesn \' t contain enough data. Ignoring. ' )
2013-11-20 07:29:37 +01:00
return
2014-08-27 09:14:32 +02:00
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass the nonce, time, and object type
2013-11-20 07:29:37 +01:00
requestedAddressVersionNumber , addressVersionLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
readPosition + = addressVersionLength
streamNumber , streamNumberLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2014-01-17 02:10:04 +01:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' % streamNumber )
2013-11-20 07:29:37 +01:00
return
readPosition + = streamNumberLength
inventoryHash = calculateInventoryHash ( data )
2017-01-10 21:15:35 +01:00
if inventoryHash in Inventory ( ) :
2014-01-17 02:10:04 +01:00
logger . debug ( ' We have already received this getpubkey request. Ignoring it. ' )
2013-11-20 07:29:37 +01:00
return
2014-08-27 09:14:32 +02:00
objectType = 0
2017-01-10 21:15:35 +01:00
Inventory ( ) [ inventoryHash ] = (
2013-11-20 07:29:37 +01:00
objectType , streamNumber , data , embeddedTime , ' ' )
# This getpubkey request is valid. Forward to peers.
2016-03-23 23:26:57 +01:00
logger . debug ( ' advertising inv with hash: %s ' % hexlify ( inventoryHash ) )
2017-01-11 17:00:00 +01:00
protocol . broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
2013-11-20 07:29:37 +01:00
# Now let's queue it to be processed ourselves.
2016-01-22 11:17:10 +01:00
objectProcessorQueue . put ( ( objectType , data ) )
2013-11-20 07:29:37 +01:00
2014-08-27 09:14:32 +02:00
def _checkAndSharePubkeyWithPeers ( data ) :
if len ( data ) < 146 or len ( data ) > 440 : # sanity check
2013-11-20 07:29:37 +01:00
return
2014-08-27 09:14:32 +02:00
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass the nonce, time, and object type
2013-11-20 07:29:37 +01:00
addressVersion , varintLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
streamNumber , varintLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2014-01-17 02:10:04 +01:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' % streamNumber )
2013-11-20 07:29:37 +01:00
return
if addressVersion > = 4 :
tag = data [ readPosition : readPosition + 32 ]
2016-03-23 23:26:57 +01:00
logger . debug ( ' tag in received pubkey is: %s ' % hexlify ( tag ) )
2013-11-20 07:29:37 +01:00
else :
tag = ' '
inventoryHash = calculateInventoryHash ( data )
2017-01-10 21:15:35 +01:00
if inventoryHash in Inventory ( ) :
2014-01-17 02:10:04 +01:00
logger . debug ( ' We have already received this pubkey. Ignoring it. ' )
2013-11-20 07:29:37 +01:00
return
2014-08-27 09:14:32 +02:00
objectType = 1
2017-01-10 21:15:35 +01:00
Inventory ( ) [ inventoryHash ] = (
2013-11-20 07:29:37 +01:00
objectType , streamNumber , data , embeddedTime , tag )
# This object is valid. Forward it to peers.
2016-03-23 23:26:57 +01:00
logger . debug ( ' advertising inv with hash: %s ' % hexlify ( inventoryHash ) )
2017-01-11 17:00:00 +01:00
protocol . broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
2013-11-20 07:29:37 +01:00
# Now let's queue it to be processed ourselves.
2016-01-22 11:17:10 +01:00
objectProcessorQueue . put ( ( objectType , data ) )
2013-11-20 07:29:37 +01:00
2014-08-27 09:14:32 +02:00
def _checkAndShareBroadcastWithPeers ( data ) :
2013-11-20 07:29:37 +01:00
if len ( data ) < 180 :
2014-01-17 02:10:04 +01:00
logger . debug ( ' The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message. ' )
2013-11-20 07:29:37 +01:00
return
2014-08-27 09:14:32 +02:00
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass the nonce, time, and object type
2013-11-20 07:29:37 +01:00
broadcastVersion , broadcastVersionLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
2013-12-01 05:15:18 +01:00
readPosition + = broadcastVersionLength
2013-11-20 07:29:37 +01:00
if broadcastVersion > = 2 :
2013-12-01 05:15:18 +01:00
streamNumber , streamNumberLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
readPosition + = streamNumberLength
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2014-01-17 02:10:04 +01:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' % streamNumber )
2013-11-20 07:29:37 +01:00
return
2013-12-01 05:15:18 +01:00
if broadcastVersion > = 3 :
tag = data [ readPosition : readPosition + 32 ]
else :
tag = ' '
2013-11-20 07:29:37 +01:00
inventoryHash = calculateInventoryHash ( data )
2017-01-10 21:15:35 +01:00
if inventoryHash in Inventory ( ) :
2014-01-17 02:10:04 +01:00
logger . debug ( ' We have already received this broadcast object. Ignoring. ' )
2013-11-20 07:29:37 +01:00
return
# It is valid. Let's let our peers know about it.
2014-08-27 09:14:32 +02:00
objectType = 3
2017-01-10 21:15:35 +01:00
Inventory ( ) [ inventoryHash ] = (
2013-12-01 05:15:18 +01:00
objectType , streamNumber , data , embeddedTime , tag )
2013-11-20 07:29:37 +01:00
# This object is valid. Forward it to peers.
2016-03-23 23:26:57 +01:00
logger . debug ( ' advertising inv with hash: %s ' % hexlify ( inventoryHash ) )
2017-01-11 17:00:00 +01:00
protocol . broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
2013-11-20 07:29:37 +01:00
# Now let's queue it to be processed ourselves.
2016-01-22 11:17:10 +01:00
objectProcessorQueue . put ( ( objectType , data ) )
2013-11-20 07:29:37 +01:00
2014-09-15 08:34:33 +02:00
def openKeysFile ( ) :
if ' linux ' in sys . platform :
2017-01-11 17:26:25 +01:00
import subprocess
2017-01-11 17:00:00 +01:00
subprocess . call ( [ " xdg-open " , state . appdata + ' keys.dat ' ] )
2014-09-15 08:34:33 +02:00
else :
2017-01-11 17:00:00 +01:00
os . startfile ( state . appdata + ' keys.dat ' )
2014-09-15 08:34:33 +02:00
2013-07-30 22:23:18 +02:00
from debug import logger