2017-01-11 14:27:19 +01:00
import base64
2017-01-11 17:00:00 +01:00
from binascii import hexlify
2017-01-11 14:27:19 +01:00
import hashlib
import random
import socket
import ssl
from struct import pack , unpack , Struct
import sys
import time
2017-01-11 17:46:33 +01:00
import traceback
2017-01-11 14:27:19 +01:00
2017-01-11 17:00:00 +01:00
from addresses import calculateInventoryHash , encodeVarint , decodeVarint , decodeAddress , varintDecodeError
2017-02-22 09:34:54 +01:00
from bmconfigparser import BMConfigParser
2017-01-11 17:00:00 +01:00
from debug import logger
2017-02-08 20:37:42 +01:00
import defaults
2017-01-11 17:00:00 +01:00
from helper_sql import sqlExecute
import highlevelcrypto
from inventory import Inventory
2017-02-08 14:19:02 +01:00
from queues import objectProcessorQueue
2017-01-11 17:00:00 +01:00
import state
2017-01-11 14:27:19 +01:00
from version import softwareVersion
#Service flags
NODE_NETWORK = 1
NODE_SSL = 2
2017-09-25 01:17:04 +02:00
NODE_DANDELION = 8
2017-01-11 14:27:19 +01:00
#Bitfield flags
BITFIELD_DOESACK = 1
2017-04-16 18:27:15 +02:00
#Error types
STATUS_WARNING = 0
STATUS_ERROR = 1
STATUS_FATAL = 2
2017-05-24 16:51:49 +02:00
#Object types
OBJECT_GETPUBKEY = 0
OBJECT_PUBKEY = 1
OBJECT_MSG = 2
OBJECT_BROADCAST = 3
2017-08-09 17:34:47 +02:00
OBJECT_I2P = 0x493250
OBJECT_ADDR = 0x61646472
2017-05-24 16:51:49 +02:00
2017-01-11 14:27:19 +01:00
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack (
' >Q ' , random . randrange ( 1 , 18446744073709551615 ) )
#Compiled struct for packing/unpacking headers
#New code should use CreatePacket instead of Header.pack
Header = Struct ( ' !L12sL4s ' )
2017-03-20 18:32:26 +01:00
VersionPacket = Struct ( ' >LqQ20s4s36sH ' )
2017-03-11 11:12:08 +01:00
2017-01-11 14:27:19 +01:00
# Bitfield
2015-12-15 20:30:32 +01:00
def getBitfield ( address ) :
# bitfield of features supported by me (see the wiki).
bitfield = 0
# send ack
2017-01-11 14:27:19 +01:00
if not BMConfigParser ( ) . safeGetBoolean ( address , ' dontsendack ' ) :
bitfield | = BITFIELD_DOESACK
return pack ( ' >I ' , bitfield )
2015-12-15 20:30:32 +01:00
def checkBitfield ( bitfieldBinary , flags ) :
2017-01-11 14:27:19 +01:00
bitfield , = unpack ( ' >I ' , bitfieldBinary )
return ( bitfield & flags ) == flags
def isBitSetWithinBitfield ( fourByteString , n ) :
# Uses MSB 0 bit numbering across 4 bytes of data
n = 31 - n
x , = unpack ( ' >L ' , fourByteString )
return x & 2 * * n != 0
2017-05-25 23:04:33 +02:00
# ip addresses
2017-01-11 14:27:19 +01:00
def encodeHost ( host ) :
if host . find ( ' .onion ' ) > - 1 :
return ' \xfd \x87 \xd8 \x7e \xeb \x43 ' + base64 . b32decode ( host . split ( " . " ) [ 0 ] , True )
elif host . find ( ' : ' ) == - 1 :
return ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' + \
socket . inet_aton ( host )
else :
return socket . inet_pton ( socket . AF_INET6 , host )
2017-01-12 19:18:56 +01:00
def networkType ( host ) :
if host . find ( ' .onion ' ) > - 1 :
return ' onion '
elif host . find ( ' : ' ) == - 1 :
return ' IPv4 '
else :
return ' IPv6 '
2017-05-27 19:09:21 +02:00
def checkIPAddress ( host , private = False ) :
2017-05-25 23:04:33 +02:00
if host [ 0 : 12 ] == ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' :
hostStandardFormat = socket . inet_ntop ( socket . AF_INET , host [ 12 : ] )
2017-05-27 19:09:21 +02:00
return checkIPv4Address ( host [ 12 : ] , hostStandardFormat , private )
2017-05-25 23:04:33 +02:00
elif host [ 0 : 6 ] == ' \xfd \x87 \xd8 \x7e \xeb \x43 ' :
# Onion, based on BMD/bitcoind
hostStandardFormat = base64 . b32encode ( host [ 6 : ] ) . lower ( ) + " .onion "
2017-05-27 19:09:21 +02:00
if private :
return False
2017-05-25 23:04:33 +02:00
return hostStandardFormat
else :
2017-02-23 19:22:44 +01:00
try :
hostStandardFormat = socket . inet_ntop ( socket . AF_INET6 , host )
except ValueError :
return False
2017-05-25 23:04:33 +02:00
if hostStandardFormat == " " :
# This can happen on Windows systems which are not 64-bit compatible
# so let us drop the IPv6 address.
return False
2017-05-27 19:09:21 +02:00
return checkIPv6Address ( host , hostStandardFormat , private )
2017-05-25 23:04:33 +02:00
2017-05-27 19:09:21 +02:00
def checkIPv4Address ( host , hostStandardFormat , private = False ) :
2017-05-25 23:04:33 +02:00
if host [ 0 ] == ' \x7F ' : # 127/8
2017-05-29 00:24:07 +02:00
if not private :
logger . debug ( ' Ignoring IP address in loopback range: ' + hostStandardFormat )
2017-08-09 17:34:47 +02:00
return hostStandardFormat if private else False
2017-05-25 23:04:33 +02:00
if host [ 0 ] == ' \x0A ' : # 10/8
2017-05-29 00:24:07 +02:00
if not private :
logger . debug ( ' Ignoring IP address in private range: ' + hostStandardFormat )
2017-05-27 19:09:21 +02:00
return hostStandardFormat if private else False
2017-05-25 23:04:33 +02:00
if host [ 0 : 2 ] == ' \xC0 \xA8 ' : # 192.168/16
2017-05-29 00:24:07 +02:00
if not private :
logger . debug ( ' Ignoring IP address in private range: ' + hostStandardFormat )
2017-05-27 19:09:21 +02:00
return hostStandardFormat if private else False
2017-05-25 23:04:33 +02:00
if host [ 0 : 2 ] > = ' \xAC \x10 ' and host [ 0 : 2 ] < ' \xAC \x20 ' : # 172.16/12
2017-05-29 00:24:07 +02:00
if not private :
logger . debug ( ' Ignoring IP address in private range: ' + hostStandardFormat )
return hostStandardFormat if private else False
2017-05-27 19:09:21 +02:00
return False if private else hostStandardFormat
2017-05-25 23:04:33 +02:00
2017-05-27 19:09:21 +02:00
def checkIPv6Address ( host , hostStandardFormat , private = False ) :
2017-05-25 23:04:33 +02:00
if host == ( ' \x00 ' * 15 ) + ' \x01 ' :
2017-05-29 00:24:07 +02:00
if not private :
logger . debug ( ' Ignoring loopback address: ' + hostStandardFormat )
2017-05-25 23:04:33 +02:00
return False
if host [ 0 ] == ' \xFE ' and ( ord ( host [ 1 ] ) & 0xc0 ) == 0x80 :
2017-05-29 00:24:07 +02:00
if not private :
logger . debug ( ' Ignoring local address: ' + hostStandardFormat )
2017-05-27 19:09:21 +02:00
return hostStandardFormat if private else False
2017-05-25 23:04:33 +02:00
if ( ord ( host [ 0 ] ) & 0xfe ) == 0xfc :
2017-05-29 00:24:07 +02:00
if not private :
logger . debug ( ' Ignoring unique local address: ' + hostStandardFormat )
2017-05-27 19:09:21 +02:00
return hostStandardFormat if private else False
return False if private else hostStandardFormat
2017-05-25 23:04:33 +02:00
2017-01-11 14:27:19 +01:00
# checks
def haveSSL ( server = False ) :
# python < 2.7.9's ssl library does not support ECDSA server due to missing initialisation of available curves, but client works ok
if server == False :
return True
elif sys . version_info > = ( 2 , 7 , 9 ) :
return True
return False
def checkSocksIP ( host ) :
try :
2017-01-11 17:00:00 +01:00
if state . socksIP is None or not state . socksIP :
state . socksIP = socket . gethostbyname ( BMConfigParser ( ) . get ( " bitmessagesettings " , " sockshostname " ) )
2017-02-26 20:03:14 +01:00
# uninitialised
2017-01-11 14:27:19 +01:00
except NameError :
2017-01-11 17:00:00 +01:00
state . socksIP = socket . gethostbyname ( BMConfigParser ( ) . get ( " bitmessagesettings " , " sockshostname " ) )
2017-02-26 20:03:14 +01:00
# resolving failure
except socket . gaierror :
state . socksIP = BMConfigParser ( ) . get ( " bitmessagesettings " , " sockshostname " )
2017-01-11 17:00:00 +01:00
return state . socksIP == host
def isProofOfWorkSufficient ( data ,
nonceTrialsPerByte = 0 ,
payloadLengthExtraBytes = 0 ) :
2017-02-08 20:37:42 +01:00
if nonceTrialsPerByte < defaults . networkDefaultProofOfWorkNonceTrialsPerByte :
nonceTrialsPerByte = defaults . networkDefaultProofOfWorkNonceTrialsPerByte
if payloadLengthExtraBytes < defaults . networkDefaultPayloadLengthExtraBytes :
payloadLengthExtraBytes = defaults . networkDefaultPayloadLengthExtraBytes
2017-01-11 17:00:00 +01:00
endOfLifeTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
TTL = endOfLifeTime - int ( time . time ( ) )
if TTL < 300 :
TTL = 300
POW , = unpack ( ' >Q ' , hashlib . sha512 ( hashlib . sha512 ( data [
: 8 ] + hashlib . sha512 ( data [ 8 : ] ) . digest ( ) ) . digest ( ) ) . digest ( ) [ 0 : 8 ] )
return POW < = 2 * * 64 / ( nonceTrialsPerByte * ( len ( data ) + payloadLengthExtraBytes + ( ( TTL * ( len ( data ) + payloadLengthExtraBytes ) ) / ( 2 * * 16 ) ) ) )
2017-01-11 14:27:19 +01:00
# Packet creation
def CreatePacket ( command , payload = ' ' ) :
payload_length = len ( payload )
checksum = hashlib . sha512 ( payload ) . digest ( ) [ 0 : 4 ]
b = bytearray ( Header . size + payload_length )
Header . pack_into ( b , 0 , 0xE9BEB4D9 , command , payload_length , checksum )
b [ Header . size : ] = payload
return bytes ( b )
2017-07-10 07:10:05 +02:00
def assembleVersionMessage ( remoteHost , remotePort , participatingStreams , server = False , nodeid = None ) :
2017-01-11 14:27:19 +01:00
payload = ' '
payload + = pack ( ' >L ' , 3 ) # protocol version.
2017-09-25 01:17:04 +02:00
# bitflags of the services I offer.
payload + = pack ( ' >q ' ,
NODE_NETWORK |
( NODE_SSL if haveSSL ( server ) else 0 ) |
2018-02-03 11:46:39 +01:00
( NODE_DANDELION if state . dandelion else 0 )
2017-09-25 01:17:04 +02:00
)
2017-01-11 14:27:19 +01:00
payload + = pack ( ' >q ' , int ( time . time ( ) ) )
payload + = pack (
' >q ' , 1 ) # boolservices of remote connection; ignored by the remote host.
if checkSocksIP ( remoteHost ) and server : # prevent leaking of tor outbound IP
payload + = encodeHost ( ' 127.0.0.1 ' )
payload + = pack ( ' >H ' , 8444 )
else :
payload + = encodeHost ( remoteHost )
payload + = pack ( ' >H ' , remotePort ) # remote IPv6 and port
2017-09-25 01:17:04 +02:00
# bitflags of the services I offer.
payload + = pack ( ' >q ' ,
NODE_NETWORK |
( NODE_SSL if haveSSL ( server ) else 0 ) |
2018-02-03 11:46:39 +01:00
( NODE_DANDELION if state . dandelion else 0 )
2017-09-25 01:17:04 +02:00
)
2017-01-11 14:27:19 +01:00
payload + = ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' + pack (
' >L ' , 2130706433 ) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
# we have a separate extPort and
# incoming over clearnet or
# outgoing through clearnet
2017-01-11 17:00:00 +01:00
if BMConfigParser ( ) . safeGetBoolean ( ' bitmessagesettings ' , ' upnp ' ) and state . extPort \
2017-01-11 14:27:19 +01:00
and ( ( server and not checkSocksIP ( remoteHost ) ) or \
( BMConfigParser ( ) . get ( " bitmessagesettings " , " socksproxytype " ) == " none " and not server ) ) :
2017-01-11 17:00:00 +01:00
payload + = pack ( ' >H ' , state . extPort )
2017-01-11 14:27:19 +01:00
elif checkSocksIP ( remoteHost ) and server : # incoming connection over Tor
payload + = pack ( ' >H ' , BMConfigParser ( ) . getint ( ' bitmessagesettings ' , ' onionport ' ) )
else : # no extPort and not incoming over Tor
payload + = pack ( ' >H ' , BMConfigParser ( ) . getint ( ' bitmessagesettings ' , ' port ' ) )
random . seed ( )
2017-07-10 07:10:05 +02:00
if nodeid is not None :
payload + = nodeid [ 0 : 8 ]
else :
payload + = eightBytesOfRandomDataUsedToDetectConnectionsToSelf
2017-01-11 14:27:19 +01:00
userAgent = ' /PyBitmessage: ' + softwareVersion + ' / '
payload + = encodeVarint ( len ( userAgent ) )
payload + = userAgent
2017-02-06 17:47:05 +01:00
# Streams
payload + = encodeVarint ( len ( participatingStreams ) )
count = 0
for stream in sorted ( participatingStreams ) :
payload + = encodeVarint ( stream )
count + = 1
# protocol limit, see specification
if count > = 160000 :
break
2017-01-11 14:27:19 +01:00
return CreatePacket ( ' version ' , payload )
def assembleErrorMessage ( fatal = 0 , banTime = 0 , inventoryVector = ' ' , errorText = ' ' ) :
payload = encodeVarint ( fatal )
payload + = encodeVarint ( banTime )
payload + = encodeVarint ( len ( inventoryVector ) )
payload + = inventoryVector
payload + = encodeVarint ( len ( errorText ) )
payload + = errorText
return CreatePacket ( ' error ' , payload )
# Packet decoding
def decryptAndCheckPubkeyPayload ( data , address ) :
"""
Version 4 pubkeys are encrypted . This function is run when we already have the
address to which we want to try to send a message . The ' data ' may come either
off of the wire or we might have had it already in our inventory when we tried
to send a msg to this particular address .
"""
try :
status , addressVersion , streamNumber , ripe = decodeAddress ( address )
readPosition = 20 # bypass the nonce, time, and object type
embeddedAddressVersion , varintLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
embeddedStreamNumber , varintLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
storedData = data [ 20 : readPosition ] # We'll store the address version and stream number (and some more) in the pubkeys table.
if addressVersion != embeddedAddressVersion :
logger . info ( ' Pubkey decryption was UNsuccessful due to address version mismatch. ' )
return ' failed '
if streamNumber != embeddedStreamNumber :
logger . info ( ' Pubkey decryption was UNsuccessful due to stream number mismatch. ' )
return ' failed '
tag = data [ readPosition : readPosition + 32 ]
readPosition + = 32
signedData = data [ 8 : readPosition ] # the time through the tag. More data is appended onto signedData below after the decryption.
encryptedData = data [ readPosition : ]
# Let us try to decrypt the pubkey
2017-01-11 17:00:00 +01:00
toAddress , cryptorObject = state . neededPubkeys [ tag ]
2017-01-11 14:27:19 +01:00
if toAddress != address :
2017-01-11 17:26:25 +01:00
logger . critical ( ' decryptAndCheckPubkeyPayload failed due to toAddress mismatch. This is very peculiar. toAddress: %s , address %s ' , toAddress , address )
2017-01-11 14:27:19 +01:00
# the only way I can think that this could happen is if someone encodes their address data two different ways.
# That sort of address-malleability should have been caught by the UI or API and an error given to the user.
return ' failed '
try :
decryptedData = cryptorObject . decrypt ( encryptedData )
except :
# Someone must have encrypted some data with a different key
# but tagged it with a tag for which we are watching.
logger . info ( ' Pubkey decryption was unsuccessful. ' )
return ' failed '
readPosition = 0
bitfieldBehaviors = decryptedData [ readPosition : readPosition + 4 ]
readPosition + = 4
publicSigningKey = ' \x04 ' + decryptedData [ readPosition : readPosition + 64 ]
readPosition + = 64
publicEncryptionKey = ' \x04 ' + decryptedData [ readPosition : readPosition + 64 ]
readPosition + = 64
specifiedNonceTrialsPerByte , specifiedNonceTrialsPerByteLength = decodeVarint (
decryptedData [ readPosition : readPosition + 10 ] )
readPosition + = specifiedNonceTrialsPerByteLength
specifiedPayloadLengthExtraBytes , specifiedPayloadLengthExtraBytesLength = decodeVarint (
decryptedData [ readPosition : readPosition + 10 ] )
readPosition + = specifiedPayloadLengthExtraBytesLength
storedData + = decryptedData [ : readPosition ]
signedData + = decryptedData [ : readPosition ]
signatureLength , signatureLengthLength = decodeVarint (
decryptedData [ readPosition : readPosition + 10 ] )
readPosition + = signatureLengthLength
signature = decryptedData [ readPosition : readPosition + signatureLength ]
if highlevelcrypto . verify ( signedData , signature , hexlify ( publicSigningKey ) ) :
logger . info ( ' ECDSA verify passed (within decryptAndCheckPubkeyPayload) ' )
else :
logger . info ( ' ECDSA verify failed (within decryptAndCheckPubkeyPayload) ' )
return ' failed '
sha = hashlib . new ( ' sha512 ' )
sha . update ( publicSigningKey + publicEncryptionKey )
ripeHasher = hashlib . new ( ' ripemd160 ' )
ripeHasher . update ( sha . digest ( ) )
embeddedRipe = ripeHasher . digest ( )
if embeddedRipe != ripe :
# Although this pubkey object had the tag were were looking for and was
# encrypted with the correct encryption key, it doesn't contain the
# correct pubkeys. Someone is either being malicious or using buggy software.
logger . info ( ' Pubkey decryption was UNsuccessful due to RIPE mismatch. ' )
return ' failed '
# Everything checked out. Insert it into the pubkeys table.
logger . info ( ' within decryptAndCheckPubkeyPayload, addressVersion: %s , streamNumber: %s \n \
ripe % s \n \
publicSigningKey in hex : % s \n \
2017-01-11 17:26:25 +01:00
publicEncryptionKey in hex : % s ' , addressVersion,
2017-01-11 14:27:19 +01:00
streamNumber ,
hexlify ( ripe ) ,
hexlify ( publicSigningKey ) ,
hexlify ( publicEncryptionKey )
)
t = ( address , addressVersion , storedData , int ( time . time ( ) ) , ' yes ' )
sqlExecute ( ''' INSERT INTO pubkeys VALUES (?,?,?,?,?) ''' , * t )
return ' successful '
except varintDecodeError as e :
logger . info ( ' Pubkey decryption was UNsuccessful due to a malformed varint. ' )
return ' failed '
except Exception as e :
2017-01-11 17:26:25 +01:00
logger . critical ( ' Pubkey decryption was UNsuccessful because of an unhandled exception! This is definitely a bug! \n %s ' , traceback . format_exc ( ) )
2017-01-11 14:27:19 +01:00
return ' failed '
def checkAndShareObjectWithPeers ( data ) :
"""
This function is called after either receiving an object off of the wire
or after receiving one as ackdata .
Returns the length of time that we should reserve to process this message
if we are receiving it off of the wire .
"""
if len ( data ) > 2 * * 18 :
2017-01-11 17:26:25 +01:00
logger . info ( ' The payload length of this object is too large ( %s bytes). Ignoring it. ' , len ( data ) )
2017-01-11 14:27:19 +01:00
return 0
# Let us check to make sure that the proof of work is sufficient.
if not isProofOfWorkSufficient ( data ) :
logger . info ( ' Proof of work is insufficient. ' )
return 0
endOfLifeTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
if endOfLifeTime - int ( time . time ( ) ) > 28 * 24 * 60 * 60 + 10800 : # The TTL may not be larger than 28 days + 3 hours of wiggle room
2017-01-11 17:26:25 +01:00
logger . info ( ' This object \' s End of Life time is too far in the future. Ignoring it. Time is %s ' , endOfLifeTime )
2017-01-11 14:27:19 +01:00
return 0
if endOfLifeTime - int ( time . time ( ) ) < - 3600 : # The EOL time was more than an hour ago. That's too much.
2017-01-11 17:26:25 +01:00
logger . info ( ' This object \' s End of Life time was more than an hour ago. Ignoring the object. Time is %s ' , endOfLifeTime )
2017-01-11 14:27:19 +01:00
return 0
intObjectType , = unpack ( ' >I ' , data [ 16 : 20 ] )
try :
if intObjectType == 0 :
_checkAndShareGetpubkeyWithPeers ( data )
return 0.1
elif intObjectType == 1 :
_checkAndSharePubkeyWithPeers ( data )
return 0.1
elif intObjectType == 2 :
_checkAndShareMsgWithPeers ( data )
return 0.6
elif intObjectType == 3 :
_checkAndShareBroadcastWithPeers ( data )
return 0.6
else :
_checkAndShareUndefinedObjectWithPeers ( data )
return 0.6
except varintDecodeError as e :
2017-01-11 17:26:25 +01:00
logger . debug ( " There was a problem with a varint while checking to see whether it was appropriate to share an object with peers. Some details: %s " , e )
2017-01-11 14:27:19 +01:00
except Exception as e :
2017-01-11 17:26:25 +01:00
logger . critical ( ' There was a problem while checking to see whether it was appropriate to share an object with peers. This is definitely a bug! \n %s ' , traceback . format_exc ( ) )
2017-01-11 14:27:19 +01:00
return 0
def _checkAndShareUndefinedObjectWithPeers ( data ) :
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass nonce, time, and object type
objectVersion , objectVersionLength = decodeVarint (
data [ readPosition : readPosition + 9 ] )
readPosition + = objectVersionLength
streamNumber , streamNumberLength = decodeVarint (
data [ readPosition : readPosition + 9 ] )
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2017-01-11 17:26:25 +01:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' , streamNumber )
2017-01-11 14:27:19 +01:00
return
inventoryHash = calculateInventoryHash ( data )
if inventoryHash in Inventory ( ) :
logger . debug ( ' We have already received this undefined object. Ignoring. ' )
return
objectType , = unpack ( ' >I ' , data [ 16 : 20 ] )
Inventory ( ) [ inventoryHash ] = (
objectType , streamNumber , data , embeddedTime , ' ' )
2017-01-11 17:26:25 +01:00
logger . debug ( ' advertising inv with hash: %s ' , hexlify ( inventoryHash ) )
2017-01-11 14:27:19 +01:00
broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
def _checkAndShareMsgWithPeers ( data ) :
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass nonce, time, and object type
objectVersion , objectVersionLength = decodeVarint (
data [ readPosition : readPosition + 9 ] )
readPosition + = objectVersionLength
streamNumber , streamNumberLength = decodeVarint (
data [ readPosition : readPosition + 9 ] )
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2017-01-11 17:26:25 +01:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' , streamNumber )
2017-01-11 14:27:19 +01:00
return
readPosition + = streamNumberLength
inventoryHash = calculateInventoryHash ( data )
if inventoryHash in Inventory ( ) :
logger . debug ( ' We have already received this msg message. Ignoring. ' )
return
# This msg message is valid. Let's let our peers know about it.
objectType = 2
Inventory ( ) [ inventoryHash ] = (
objectType , streamNumber , data , embeddedTime , ' ' )
2017-01-11 17:26:25 +01:00
logger . debug ( ' advertising inv with hash: %s ' , hexlify ( inventoryHash ) )
2017-01-11 14:27:19 +01:00
broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
# Now let's enqueue it to be processed ourselves.
objectProcessorQueue . put ( ( objectType , data ) )
def _checkAndShareGetpubkeyWithPeers ( data ) :
if len ( data ) < 42 :
logger . info ( ' getpubkey message doesn \' t contain enough data. Ignoring. ' )
return
if len ( data ) > 200 :
logger . info ( ' getpubkey is abnormally long. Sanity check failed. Ignoring object. ' )
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass the nonce, time, and object type
requestedAddressVersionNumber , addressVersionLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
readPosition + = addressVersionLength
streamNumber , streamNumberLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2017-01-11 17:26:25 +01:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' , streamNumber )
2017-01-11 14:27:19 +01:00
return
readPosition + = streamNumberLength
inventoryHash = calculateInventoryHash ( data )
if inventoryHash in Inventory ( ) :
logger . debug ( ' We have already received this getpubkey request. Ignoring it. ' )
return
objectType = 0
Inventory ( ) [ inventoryHash ] = (
objectType , streamNumber , data , embeddedTime , ' ' )
# This getpubkey request is valid. Forward to peers.
2017-01-11 17:26:25 +01:00
logger . debug ( ' advertising inv with hash: %s ' , hexlify ( inventoryHash ) )
2017-01-11 14:27:19 +01:00
broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
# Now let's queue it to be processed ourselves.
objectProcessorQueue . put ( ( objectType , data ) )
def _checkAndSharePubkeyWithPeers ( data ) :
if len ( data ) < 146 or len ( data ) > 440 : # sanity check
return
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass the nonce, time, and object type
addressVersion , varintLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
streamNumber , varintLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2017-01-11 17:26:25 +01:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' , streamNumber )
2017-01-11 14:27:19 +01:00
return
if addressVersion > = 4 :
tag = data [ readPosition : readPosition + 32 ]
2017-01-11 17:26:25 +01:00
logger . debug ( ' tag in received pubkey is: %s ' , hexlify ( tag ) )
2017-01-11 14:27:19 +01:00
else :
tag = ' '
inventoryHash = calculateInventoryHash ( data )
if inventoryHash in Inventory ( ) :
logger . debug ( ' We have already received this pubkey. Ignoring it. ' )
return
objectType = 1
Inventory ( ) [ inventoryHash ] = (
objectType , streamNumber , data , embeddedTime , tag )
# This object is valid. Forward it to peers.
2017-01-11 17:26:25 +01:00
logger . debug ( ' advertising inv with hash: %s ' , hexlify ( inventoryHash ) )
2017-01-11 14:27:19 +01:00
broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
# Now let's queue it to be processed ourselves.
objectProcessorQueue . put ( ( objectType , data ) )
def _checkAndShareBroadcastWithPeers ( data ) :
if len ( data ) < 180 :
logger . debug ( ' The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message. ' )
return
embeddedTime , = unpack ( ' >Q ' , data [ 8 : 16 ] )
readPosition = 20 # bypass the nonce, time, and object type
broadcastVersion , broadcastVersionLength = decodeVarint (
data [ readPosition : readPosition + 10 ] )
readPosition + = broadcastVersionLength
if broadcastVersion > = 2 :
streamNumber , streamNumberLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
readPosition + = streamNumberLength
2017-01-11 17:00:00 +01:00
if not streamNumber in state . streamsInWhichIAmParticipating :
2017-01-11 17:26:25 +01:00
logger . debug ( ' The streamNumber %s isn \' t one we are interested in. ' , streamNumber )
2017-01-11 14:27:19 +01:00
return
if broadcastVersion > = 3 :
tag = data [ readPosition : readPosition + 32 ]
else :
tag = ' '
inventoryHash = calculateInventoryHash ( data )
if inventoryHash in Inventory ( ) :
logger . debug ( ' We have already received this broadcast object. Ignoring. ' )
return
# It is valid. Let's let our peers know about it.
objectType = 3
Inventory ( ) [ inventoryHash ] = (
objectType , streamNumber , data , embeddedTime , tag )
# This object is valid. Forward it to peers.
2017-01-11 17:26:25 +01:00
logger . debug ( ' advertising inv with hash: %s ' , hexlify ( inventoryHash ) )
2017-01-11 14:27:19 +01:00
broadcastToSendDataQueues ( ( streamNumber , ' advertiseobject ' , inventoryHash ) )
# Now let's queue it to be processed ourselves.
objectProcessorQueue . put ( ( objectType , data ) )
2017-01-11 17:00:00 +01:00
# If you want to command all of the sendDataThreads to do something, like shutdown or send some data, this
# function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are
# responsible for putting their queue into (and out of) the sendDataQueues list.
def broadcastToSendDataQueues ( data ) :
# logger.debug('running broadcastToSendDataQueues')
for q in state . sendDataQueues :
q . put ( data )
2017-01-11 20:47:27 +01:00
# sslProtocolVersion
if sys . version_info > = ( 2 , 7 , 13 ) :
# this means TLSv1 or higher
# in the future change to
# ssl.PROTOCOL_TLS1.2
sslProtocolVersion = ssl . PROTOCOL_TLS
elif sys . version_info > = ( 2 , 7 , 9 ) :
# this means any SSL/TLS. SSLv2 and 3 are excluded with an option after context is created
sslProtocolVersion = ssl . PROTOCOL_SSLv23
else :
# this means TLSv1, there is no way to set "TLSv1 or higher" or
# "TLSv1.2" in < 2.7.9
sslProtocolVersion = ssl . PROTOCOL_TLSv1
2017-01-14 17:47:57 +01:00
# ciphers
2017-02-18 17:22:17 +01:00
if ssl . OPENSSL_VERSION_NUMBER > = 0x10100000 and not ssl . OPENSSL_VERSION . startswith ( " LibreSSL " ) :
2017-01-14 17:47:57 +01:00
sslProtocolCiphers = " AECDH-AES256-SHA@SECLEVEL=0 "
else :
sslProtocolCiphers = " AECDH-AES256-SHA "