2015-08-22 10:48:49 +02:00
|
|
|
|
from __future__ import division
|
2015-01-21 18:38:25 +01:00
|
|
|
|
|
2013-07-08 22:21:29 +02:00
|
|
|
|
# Libraries.
|
|
|
|
|
import os
|
|
|
|
|
import sys
|
|
|
|
|
import stat
|
|
|
|
|
import time
|
2017-09-21 17:24:51 +02:00
|
|
|
|
import threading
|
2014-08-27 09:14:32 +02:00
|
|
|
|
import traceback
|
2017-09-21 17:24:51 +02:00
|
|
|
|
import hashlib
|
|
|
|
|
import subprocess
|
|
|
|
|
from struct import unpack
|
2016-03-23 23:26:57 +01:00
|
|
|
|
from binascii import hexlify
|
2017-09-21 17:24:51 +02:00
|
|
|
|
from pyelliptic import arithmetic
|
2013-07-08 22:21:29 +02:00
|
|
|
|
|
|
|
|
|
# Project imports.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
import protocol
|
|
|
|
|
import state
|
2013-06-24 21:51:01 +02:00
|
|
|
|
import highlevelcrypto
|
2017-09-21 17:24:51 +02:00
|
|
|
|
from bmconfigparser import BMConfigParser
|
|
|
|
|
from debug import logger
|
|
|
|
|
from addresses import (
|
|
|
|
|
decodeAddress, encodeVarint, decodeVarint, varintDecodeError,
|
|
|
|
|
calculateInventoryHash
|
|
|
|
|
)
|
|
|
|
|
from helper_sql import sqlQuery, sqlExecute
|
2017-03-19 22:08:00 +01:00
|
|
|
|
from inventory import Inventory
|
2017-02-08 13:41:56 +01:00
|
|
|
|
from queues import objectProcessorQueue
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
verbose = 1
|
|
|
|
|
# This is obsolete with the change to protocol v3
|
|
|
|
|
# but the singleCleaner thread still hasn't been updated
|
|
|
|
|
# so we need this a little longer.
|
|
|
|
|
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000
|
|
|
|
|
# Equals 4 weeks. You could make this longer if you want
|
|
|
|
|
# but making it shorter would not be advisable because
|
|
|
|
|
# there is a very small possibility that it could keep you
|
|
|
|
|
# from obtaining a needed pubkey for a period of time.
|
|
|
|
|
lengthOfTimeToHoldOnToAllPubkeys = 2419200
|
|
|
|
|
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 # Equals three hours
|
|
|
|
|
# If you set this to True while on the normal network,
|
|
|
|
|
# you won't be able to send or sometimes receive messages.
|
|
|
|
|
useVeryEasyProofOfWorkForTesting = False
|
2013-05-02 17:53:54 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
myECCryptorObjects = {}
|
|
|
|
|
MyECSubscriptionCryptorObjects = {}
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# The key in this dictionary is the RIPE hash which is encoded
|
|
|
|
|
# in an address and value is the address itself.
|
|
|
|
|
myAddressesByHash = {}
|
|
|
|
|
# The key in this dictionary is the tag generated from the address.
|
|
|
|
|
myAddressesByTag = {}
|
2013-05-02 17:53:54 +02:00
|
|
|
|
broadcastSendersForWhichImWatching = {}
|
|
|
|
|
printLock = threading.Lock()
|
|
|
|
|
statusIconColor = 'red'
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# List of hosts to which we are connected. Used to guarantee
|
|
|
|
|
# that the outgoingSynSender threads won't connect to the same
|
|
|
|
|
# remote node twice.
|
|
|
|
|
connectedHostsList = {}
|
|
|
|
|
thisapp = None # singleton lock instance
|
2013-06-24 21:51:01 +02:00
|
|
|
|
alreadyAttemptedConnectionsList = {
|
|
|
|
|
} # This is a list of nodes to which we have already attempted a connection
|
|
|
|
|
alreadyAttemptedConnectionsListLock = threading.Lock()
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# used to clear out the alreadyAttemptedConnectionsList periodically
|
|
|
|
|
# so that we will retry connecting to hosts to which we have already
|
|
|
|
|
# tried to connect.
|
|
|
|
|
alreadyAttemptedConnectionsListResetTime = int(time.time())
|
|
|
|
|
# A list of the amounts of time it took to successfully decrypt msg messages
|
|
|
|
|
successfullyDecryptMessageTimings = []
|
2013-06-24 21:51:01 +02:00
|
|
|
|
ackdataForWhichImWatching = {}
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# used by API command clientStatus
|
|
|
|
|
clientHasReceivedIncomingConnections = False
|
2013-08-25 01:40:48 +02:00
|
|
|
|
numberOfMessagesProcessed = 0
|
|
|
|
|
numberOfBroadcastsProcessed = 0
|
|
|
|
|
numberOfPubkeysProcessed = 0
|
2018-01-25 11:58:29 +01:00
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# If True, the singleCleaner will write it to disk eventually.
|
|
|
|
|
needToWriteKnownNodesToDisk = False
|
|
|
|
|
|
2013-11-04 08:05:07 +01:00
|
|
|
|
maximumLengthOfTimeToBotherResendingMessages = 0
|
2016-10-23 10:12:49 +02:00
|
|
|
|
timeOffsetWrongCount = 0
|
2013-05-02 17:53:54 +02:00
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2013-05-02 17:53:54 +02:00
|
|
|
|
def isAddressInMyAddressBook(address):
|
2013-08-29 14:03:45 +02:00
|
|
|
|
queryreturn = sqlQuery(
|
|
|
|
|
'''select address from addressbook where address=?''',
|
|
|
|
|
address)
|
2013-05-02 17:53:54 +02:00
|
|
|
|
return queryreturn != []
|
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
|
|
|
|
# At this point we should really just have a isAddressInMy(book, address)...
|
2013-06-14 03:55:38 +02:00
|
|
|
|
def isAddressInMySubscriptionsList(address):
|
2013-08-29 16:00:27 +02:00
|
|
|
|
queryreturn = sqlQuery(
|
2013-08-29 14:03:45 +02:00
|
|
|
|
'''select * from subscriptions where address=?''',
|
|
|
|
|
str(address))
|
2013-06-14 03:55:38 +02:00
|
|
|
|
return queryreturn != []
|
2013-06-14 04:03:03 +02:00
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2013-05-02 17:53:54 +02:00
|
|
|
|
def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address):
|
|
|
|
|
if isAddressInMyAddressBook(address):
|
|
|
|
|
return True
|
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
queryreturn = sqlQuery(
|
|
|
|
|
'''SELECT address FROM whitelist where address=?'''
|
|
|
|
|
''' and enabled = '1' ''',
|
|
|
|
|
address)
|
|
|
|
|
if queryreturn != []:
|
2013-05-02 17:53:54 +02:00
|
|
|
|
return True
|
|
|
|
|
|
2013-08-29 16:00:27 +02:00
|
|
|
|
queryreturn = sqlQuery(
|
2017-09-21 17:24:51 +02:00
|
|
|
|
'''select address from subscriptions where address=?'''
|
|
|
|
|
''' and enabled = '1' ''',
|
2013-08-29 14:03:45 +02:00
|
|
|
|
address)
|
2017-09-21 17:24:51 +02:00
|
|
|
|
if queryreturn != []:
|
2013-05-02 17:53:54 +02:00
|
|
|
|
return True
|
|
|
|
|
return False
|
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2013-05-02 17:53:54 +02:00
|
|
|
|
def decodeWalletImportFormat(WIFstring):
|
2017-09-21 17:24:51 +02:00
|
|
|
|
fullString = arithmetic.changebase(WIFstring, 58, 256)
|
2013-05-02 17:53:54 +02:00
|
|
|
|
privkey = fullString[:-4]
|
2017-09-21 17:24:51 +02:00
|
|
|
|
if fullString[-4:] != \
|
|
|
|
|
hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]:
|
|
|
|
|
logger.critical(
|
|
|
|
|
'Major problem! When trying to decode one of your'
|
|
|
|
|
' private keys, the checksum failed. Here are the first'
|
|
|
|
|
' 6 characters of the PRIVATE key: %s',
|
|
|
|
|
str(WIFstring)[:6]
|
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
os._exit(0)
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# return ""
|
2017-09-21 17:29:32 +02:00
|
|
|
|
elif privkey[0] == '\x80': # checksum passed
|
|
|
|
|
return privkey[1:]
|
|
|
|
|
|
|
|
|
|
logger.critical(
|
|
|
|
|
'Major problem! When trying to decode one of your private keys,'
|
|
|
|
|
' the checksum passed but the key doesn\'t begin with hex 80.'
|
|
|
|
|
' Here is the PRIVATE key: %s', WIFstring
|
|
|
|
|
)
|
|
|
|
|
os._exit(0)
|
2013-05-02 17:53:54 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def reloadMyAddressHashes():
|
2013-07-10 20:50:18 +02:00
|
|
|
|
logger.debug('reloading keys from keys.dat file')
|
2013-05-02 17:53:54 +02:00
|
|
|
|
myECCryptorObjects.clear()
|
|
|
|
|
myAddressesByHash.clear()
|
2013-09-15 03:06:26 +02:00
|
|
|
|
myAddressesByTag.clear()
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# myPrivateKeys.clear()
|
2013-06-27 12:02:52 +02:00
|
|
|
|
|
2017-01-11 17:00:00 +01:00
|
|
|
|
keyfileSecure = checkSensitiveFilePermissions(state.appdata + 'keys.dat')
|
2013-06-27 12:02:52 +02:00
|
|
|
|
hasEnabledKeys = False
|
2017-05-15 12:18:07 +02:00
|
|
|
|
for addressInKeysFile in BMConfigParser().addresses():
|
|
|
|
|
isEnabled = BMConfigParser().getboolean(addressInKeysFile, 'enabled')
|
|
|
|
|
if isEnabled:
|
|
|
|
|
hasEnabledKeys = True
|
2018-03-22 12:23:36 +01:00
|
|
|
|
# status
|
|
|
|
|
_, addressVersionNumber, streamNumber, hash = \
|
2017-09-21 17:24:51 +02:00
|
|
|
|
decodeAddress(addressInKeysFile)
|
|
|
|
|
if addressVersionNumber in (2, 3, 4):
|
|
|
|
|
# Returns a simple 32 bytes of information encoded
|
|
|
|
|
# in 64 Hex characters, or null if there was an error.
|
2017-05-15 12:18:07 +02:00
|
|
|
|
privEncryptionKey = hexlify(decodeWalletImportFormat(
|
2017-09-21 17:24:51 +02:00
|
|
|
|
BMConfigParser().get(addressInKeysFile, 'privencryptionkey'))
|
|
|
|
|
)
|
2017-05-15 12:18:07 +02:00
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# It is 32 bytes encoded as 64 hex characters
|
|
|
|
|
if len(privEncryptionKey) == 64:
|
|
|
|
|
myECCryptorObjects[hash] = \
|
|
|
|
|
highlevelcrypto.makeCryptor(privEncryptionKey)
|
2017-05-15 12:18:07 +02:00
|
|
|
|
myAddressesByHash[hash] = addressInKeysFile
|
2017-09-21 17:24:51 +02:00
|
|
|
|
tag = hashlib.sha512(hashlib.sha512(
|
|
|
|
|
encodeVarint(addressVersionNumber) +
|
|
|
|
|
encodeVarint(streamNumber) + hash).digest()
|
|
|
|
|
).digest()[32:]
|
2017-05-15 12:18:07 +02:00
|
|
|
|
myAddressesByTag[tag] = addressInKeysFile
|
|
|
|
|
|
|
|
|
|
else:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.error(
|
|
|
|
|
'Error in reloadMyAddressHashes: Can\'t handle'
|
|
|
|
|
' address versions other than 2, 3, or 4.\n'
|
|
|
|
|
)
|
2013-06-27 12:44:49 +02:00
|
|
|
|
|
|
|
|
|
if not keyfileSecure:
|
2017-01-11 17:00:00 +01:00
|
|
|
|
fixSensitiveFilePermissions(state.appdata + 'keys.dat', hasEnabledKeys)
|
2013-05-02 17:53:54 +02:00
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2013-05-02 17:53:54 +02:00
|
|
|
|
def reloadBroadcastSendersForWhichImWatching():
|
|
|
|
|
broadcastSendersForWhichImWatching.clear()
|
|
|
|
|
MyECSubscriptionCryptorObjects.clear()
|
2013-08-29 14:03:45 +02:00
|
|
|
|
queryreturn = sqlQuery('SELECT address FROM subscriptions where enabled=1')
|
2013-09-15 03:06:26 +02:00
|
|
|
|
logger.debug('reloading subscriptions...')
|
2013-05-02 17:53:54 +02:00
|
|
|
|
for row in queryreturn:
|
|
|
|
|
address, = row
|
2018-03-22 12:48:07 +01:00
|
|
|
|
# status
|
|
|
|
|
_, addressVersionNumber, streamNumber, hash = decodeAddress(address)
|
2013-05-02 17:53:54 +02:00
|
|
|
|
if addressVersionNumber == 2:
|
|
|
|
|
broadcastSendersForWhichImWatching[hash] = 0
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# Now, for all addresses, even version 2 addresses,
|
|
|
|
|
# we should create Cryptor objects in a dictionary which we will
|
|
|
|
|
# use to attempt to decrypt encrypted broadcast messages.
|
|
|
|
|
|
2013-09-15 03:06:26 +02:00
|
|
|
|
if addressVersionNumber <= 3:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
privEncryptionKey = hashlib.sha512(
|
|
|
|
|
encodeVarint(addressVersionNumber) +
|
|
|
|
|
encodeVarint(streamNumber) + hash
|
|
|
|
|
).digest()[:32]
|
|
|
|
|
MyECSubscriptionCryptorObjects[hash] = \
|
|
|
|
|
highlevelcrypto.makeCryptor(hexlify(privEncryptionKey))
|
2013-09-15 03:06:26 +02:00
|
|
|
|
else:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(
|
|
|
|
|
encodeVarint(addressVersionNumber) +
|
|
|
|
|
encodeVarint(streamNumber) + hash
|
|
|
|
|
).digest()).digest()
|
2013-09-15 03:06:26 +02:00
|
|
|
|
tag = doubleHashOfAddressData[32:]
|
|
|
|
|
privEncryptionKey = doubleHashOfAddressData[:32]
|
2017-09-21 17:24:51 +02:00
|
|
|
|
MyECSubscriptionCryptorObjects[tag] = \
|
|
|
|
|
highlevelcrypto.makeCryptor(hexlify(privEncryptionKey))
|
|
|
|
|
|
2013-05-02 17:53:54 +02:00
|
|
|
|
|
2013-06-11 00:53:15 +02:00
|
|
|
|
def fixPotentiallyInvalidUTF8Data(text):
|
|
|
|
|
try:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
unicode(text, 'utf-8')
|
2013-06-11 00:53:15 +02:00
|
|
|
|
return text
|
|
|
|
|
except:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
return 'Part of the message is corrupt. The message cannot be' \
|
|
|
|
|
' displayed the normal way.\n\n' + repr(text)
|
2013-07-14 22:12:59 +02:00
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
|
|
|
|
# Checks sensitive file permissions for inappropriate umask
|
|
|
|
|
# during keys.dat creation. (Or unwise subsequent chmod.)
|
2013-07-10 10:43:18 +02:00
|
|
|
|
#
|
2013-06-27 12:44:49 +02:00
|
|
|
|
# Returns true iff file appears to have appropriate permissions.
|
2013-06-27 12:02:52 +02:00
|
|
|
|
def checkSensitiveFilePermissions(filename):
|
|
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
# TODO: This might deserve extra checks by someone familiar with
|
|
|
|
|
# Windows systems.
|
2013-06-27 12:44:49 +02:00
|
|
|
|
return True
|
2013-11-29 01:20:16 +01:00
|
|
|
|
elif sys.platform[:7] == 'freebsd':
|
|
|
|
|
# FreeBSD file systems are the same as major Linux file systems
|
|
|
|
|
present_permissions = os.stat(filename)[0]
|
|
|
|
|
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
|
|
|
|
return present_permissions & disallowed_permissions == 0
|
2013-06-27 12:02:52 +02:00
|
|
|
|
else:
|
2013-08-05 22:06:46 +02:00
|
|
|
|
try:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# Skip known problems for non-Win32 filesystems
|
|
|
|
|
# without POSIX permissions.
|
|
|
|
|
fstype = subprocess.check_output(
|
|
|
|
|
'stat -f -c "%%T" %s' % (filename),
|
|
|
|
|
shell=True,
|
|
|
|
|
stderr=subprocess.STDOUT
|
|
|
|
|
)
|
2013-08-05 22:06:46 +02:00
|
|
|
|
if 'fuseblk' in fstype:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.info(
|
|
|
|
|
'Skipping file permissions check for %s.'
|
|
|
|
|
' Filesystem fuseblk detected.', filename)
|
2013-08-05 22:06:46 +02:00
|
|
|
|
return True
|
|
|
|
|
except:
|
|
|
|
|
# Swallow exception here, but we might run into trouble later!
|
2013-08-13 02:32:13 +02:00
|
|
|
|
logger.error('Could not determine filesystem type. %s', filename)
|
2013-06-27 12:44:49 +02:00
|
|
|
|
present_permissions = os.stat(filename)[0]
|
|
|
|
|
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
|
|
|
|
return present_permissions & disallowed_permissions == 0
|
2013-06-27 12:02:52 +02:00
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2013-06-27 12:02:52 +02:00
|
|
|
|
# Fixes permissions on a sensitive file.
|
2013-06-27 12:44:49 +02:00
|
|
|
|
def fixSensitiveFilePermissions(filename, hasEnabledKeys):
|
|
|
|
|
if hasEnabledKeys:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.warning(
|
|
|
|
|
'Keyfile had insecure permissions, and there were enabled'
|
|
|
|
|
' keys. The truly paranoid should stop using them immediately.')
|
2013-06-27 12:44:49 +02:00
|
|
|
|
else:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.warning(
|
|
|
|
|
'Keyfile had insecure permissions, but there were no enabled keys.'
|
|
|
|
|
)
|
2013-06-27 12:44:49 +02:00
|
|
|
|
try:
|
|
|
|
|
present_permissions = os.stat(filename)[0]
|
|
|
|
|
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
2017-09-21 17:24:51 +02:00
|
|
|
|
allowed_permissions = ((1 << 32) - 1) ^ disallowed_permissions
|
2013-06-27 12:44:49 +02:00
|
|
|
|
new_permissions = (
|
|
|
|
|
allowed_permissions & present_permissions)
|
|
|
|
|
os.chmod(filename, new_permissions)
|
|
|
|
|
|
2013-07-08 22:21:29 +02:00
|
|
|
|
logger.info('Keyfile permissions automatically fixed.')
|
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
except Exception:
|
2013-07-08 22:21:29 +02:00
|
|
|
|
logger.exception('Keyfile permissions could not be fixed.')
|
2013-06-27 12:44:49 +02:00
|
|
|
|
raise
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
|
|
|
|
|
2013-08-08 21:37:48 +02:00
|
|
|
|
def isBitSetWithinBitfield(fourByteString, n):
|
|
|
|
|
# Uses MSB 0 bit numbering across 4 bytes of data
|
|
|
|
|
n = 31 - n
|
|
|
|
|
x, = unpack('>L', fourByteString)
|
|
|
|
|
return x & 2**n != 0
|
2013-06-26 14:28:01 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
|
|
|
|
|
def decryptAndCheckPubkeyPayload(data, address):
|
|
|
|
|
"""
|
2017-09-21 17:24:51 +02:00
|
|
|
|
Version 4 pubkeys are encrypted. This function is run when we
|
|
|
|
|
already have the address to which we want to try to send a message.
|
|
|
|
|
The 'data' may come either off of the wire or we might have had it
|
|
|
|
|
already in our inventory when we tried to send a msg to this
|
|
|
|
|
particular address.
|
2014-08-27 09:14:32 +02:00
|
|
|
|
"""
|
2013-09-18 06:04:01 +02:00
|
|
|
|
try:
|
2018-03-22 12:48:07 +01:00
|
|
|
|
# status
|
|
|
|
|
_, addressVersion, streamNumber, ripe = decodeAddress(address)
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition = 20 # bypass the nonce, time, and object type
|
2017-09-21 17:24:51 +02:00
|
|
|
|
embeddedAddressVersion, varintLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 10])
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition += varintLength
|
2017-09-21 17:24:51 +02:00
|
|
|
|
embeddedStreamNumber, varintLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 10])
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition += varintLength
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# We'll store the address version and stream number
|
|
|
|
|
# (and some more) in the pubkeys table.
|
|
|
|
|
storedData = data[20:readPosition]
|
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
if addressVersion != embeddedAddressVersion:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.info(
|
|
|
|
|
'Pubkey decryption was UNsuccessful'
|
|
|
|
|
' due to address version mismatch.')
|
2013-09-18 06:04:01 +02:00
|
|
|
|
return 'failed'
|
2014-08-27 09:14:32 +02:00
|
|
|
|
if streamNumber != embeddedStreamNumber:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.info(
|
|
|
|
|
'Pubkey decryption was UNsuccessful'
|
|
|
|
|
' due to stream number mismatch.')
|
2014-08-27 09:14:32 +02:00
|
|
|
|
return 'failed'
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
tag = data[readPosition:readPosition + 32]
|
|
|
|
|
readPosition += 32
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# the time through the tag. More data is appended onto
|
|
|
|
|
# signedData below after the decryption.
|
|
|
|
|
signedData = data[8:readPosition]
|
2014-08-27 09:14:32 +02:00
|
|
|
|
encryptedData = data[readPosition:]
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
# Let us try to decrypt the pubkey
|
2017-01-13 09:30:23 +01:00
|
|
|
|
toAddress, cryptorObject = state.neededPubkeys[tag]
|
2014-08-27 09:14:32 +02:00
|
|
|
|
if toAddress != address:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.critical(
|
|
|
|
|
'decryptAndCheckPubkeyPayload failed due to toAddress'
|
|
|
|
|
' mismatch. This is very peculiar.'
|
|
|
|
|
' toAddress: %s, address %s',
|
|
|
|
|
toAddress, address
|
|
|
|
|
)
|
|
|
|
|
# the only way I can think that this could happen
|
|
|
|
|
# is if someone encodes their address data two different ways.
|
|
|
|
|
# That sort of address-malleability should have been caught
|
|
|
|
|
# by the UI or API and an error given to the user.
|
2014-08-27 09:14:32 +02:00
|
|
|
|
return 'failed'
|
|
|
|
|
try:
|
|
|
|
|
decryptedData = cryptorObject.decrypt(encryptedData)
|
|
|
|
|
except:
|
|
|
|
|
# Someone must have encrypted some data with a different key
|
|
|
|
|
# but tagged it with a tag for which we are watching.
|
|
|
|
|
logger.info('Pubkey decryption was unsuccessful.')
|
|
|
|
|
return 'failed'
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition = 0
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# bitfieldBehaviors = decryptedData[readPosition:readPosition + 4]
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition += 4
|
2017-09-21 17:24:51 +02:00
|
|
|
|
publicSigningKey = \
|
|
|
|
|
'\x04' + decryptedData[readPosition:readPosition + 64]
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition += 64
|
2017-09-21 17:24:51 +02:00
|
|
|
|
publicEncryptionKey = \
|
|
|
|
|
'\x04' + decryptedData[readPosition:readPosition + 64]
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition += 64
|
2017-09-21 17:24:51 +02:00
|
|
|
|
specifiedNonceTrialsPerByte, specifiedNonceTrialsPerByteLength = \
|
|
|
|
|
decodeVarint(decryptedData[readPosition:readPosition + 10])
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition += specifiedNonceTrialsPerByteLength
|
2017-09-21 17:24:51 +02:00
|
|
|
|
specifiedPayloadLengthExtraBytes, \
|
|
|
|
|
specifiedPayloadLengthExtraBytesLength = \
|
|
|
|
|
decodeVarint(decryptedData[readPosition:readPosition + 10])
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition += specifiedPayloadLengthExtraBytesLength
|
2014-12-25 09:57:34 +01:00
|
|
|
|
storedData += decryptedData[:readPosition]
|
|
|
|
|
signedData += decryptedData[:readPosition]
|
2017-09-21 17:24:51 +02:00
|
|
|
|
signatureLength, signatureLengthLength = \
|
|
|
|
|
decodeVarint(decryptedData[readPosition:readPosition + 10])
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition += signatureLengthLength
|
|
|
|
|
signature = decryptedData[readPosition:readPosition + signatureLength]
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2017-09-21 17:29:32 +02:00
|
|
|
|
if not highlevelcrypto.verify(
|
2017-09-21 17:24:51 +02:00
|
|
|
|
signedData, signature, hexlify(publicSigningKey)):
|
|
|
|
|
logger.info(
|
|
|
|
|
'ECDSA verify failed (within decryptAndCheckPubkeyPayload)')
|
2014-12-25 09:57:34 +01:00
|
|
|
|
return 'failed'
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2017-09-21 17:29:32 +02:00
|
|
|
|
logger.info(
|
|
|
|
|
'ECDSA verify passed (within decryptAndCheckPubkeyPayload)')
|
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
sha = hashlib.new('sha512')
|
|
|
|
|
sha.update(publicSigningKey + publicEncryptionKey)
|
|
|
|
|
ripeHasher = hashlib.new('ripemd160')
|
|
|
|
|
ripeHasher.update(sha.digest())
|
|
|
|
|
embeddedRipe = ripeHasher.digest()
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
if embeddedRipe != ripe:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# Although this pubkey object had the tag were were looking for
|
|
|
|
|
# and was encrypted with the correct encryption key,
|
|
|
|
|
# it doesn't contain the correct pubkeys. Someone is
|
|
|
|
|
# either being malicious or using buggy software.
|
|
|
|
|
logger.info(
|
|
|
|
|
'Pubkey decryption was UNsuccessful due to RIPE mismatch.')
|
2014-08-27 09:14:32 +02:00
|
|
|
|
return 'failed'
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
# Everything checked out. Insert it into the pubkeys table.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
|
'within decryptAndCheckPubkeyPayload, '
|
|
|
|
|
'addressVersion: %s, streamNumber: %s\nripe %s\n'
|
|
|
|
|
'publicSigningKey in hex: %s\npublicEncryptionKey in hex: %s',
|
|
|
|
|
addressVersion, streamNumber, hexlify(ripe),
|
|
|
|
|
hexlify(publicSigningKey), hexlify(publicEncryptionKey)
|
|
|
|
|
)
|
|
|
|
|
|
2015-03-09 07:35:32 +01:00
|
|
|
|
t = (address, addressVersion, storedData, int(time.time()), 'yes')
|
2014-08-27 09:14:32 +02:00
|
|
|
|
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
|
|
|
|
return 'successful'
|
2017-09-21 17:24:51 +02:00
|
|
|
|
except varintDecodeError:
|
|
|
|
|
logger.info(
|
|
|
|
|
'Pubkey decryption was UNsuccessful due to a malformed varint.')
|
2013-09-18 06:04:01 +02:00
|
|
|
|
return 'failed'
|
2017-09-21 17:24:51 +02:00
|
|
|
|
except Exception:
|
|
|
|
|
logger.critical(
|
|
|
|
|
'Pubkey decryption was UNsuccessful because of'
|
|
|
|
|
' an unhandled exception! This is definitely a bug! \n%s' %
|
|
|
|
|
traceback.format_exc()
|
|
|
|
|
)
|
2013-09-18 06:04:01 +02:00
|
|
|
|
return 'failed'
|
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
def checkAndShareObjectWithPeers(data):
|
|
|
|
|
"""
|
2017-09-21 17:24:51 +02:00
|
|
|
|
This function is called after either receiving an object
|
|
|
|
|
off of the wire or after receiving one as ackdata.
|
|
|
|
|
Returns the length of time that we should reserve to process
|
|
|
|
|
this message if we are receiving it off of the wire.
|
2014-08-27 09:14:32 +02:00
|
|
|
|
"""
|
2014-09-10 22:47:51 +02:00
|
|
|
|
if len(data) > 2 ** 18:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.info(
|
|
|
|
|
'The payload length of this object is too large (%i bytes).'
|
|
|
|
|
' Ignoring it.', len(data)
|
|
|
|
|
)
|
2015-01-28 20:14:28 +01:00
|
|
|
|
return 0
|
2013-11-20 07:29:37 +01:00
|
|
|
|
# Let us check to make sure that the proof of work is sufficient.
|
2017-01-11 17:00:00 +01:00
|
|
|
|
if not protocol.isProofOfWorkSufficient(data):
|
2014-08-27 09:14:32 +02:00
|
|
|
|
logger.info('Proof of work is insufficient.')
|
|
|
|
|
return 0
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
endOfLifeTime, = unpack('>Q', data[8:16])
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# The TTL may not be larger than 28 days + 3 hours of wiggle room
|
|
|
|
|
if endOfLifeTime - int(time.time()) > 28 * 24 * 60 * 60 + 10800:
|
|
|
|
|
logger.info(
|
|
|
|
|
'This object\'s End of Life time is too far in the future.'
|
|
|
|
|
' Ignoring it. Time is %s', endOfLifeTime
|
|
|
|
|
)
|
2014-08-27 09:14:32 +02:00
|
|
|
|
return 0
|
2017-09-21 17:24:51 +02:00
|
|
|
|
# The EOL time was more than an hour ago. That's too much.
|
2017-09-21 17:29:32 +02:00
|
|
|
|
if endOfLifeTime - int(time.time()) < -3600:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.info(
|
|
|
|
|
'This object\'s End of Life time was more than an hour ago.'
|
|
|
|
|
' Ignoring the object. Time is %s' % endOfLifeTime
|
|
|
|
|
)
|
2014-08-27 09:14:32 +02:00
|
|
|
|
return 0
|
|
|
|
|
intObjectType, = unpack('>I', data[16:20])
|
|
|
|
|
try:
|
|
|
|
|
if intObjectType == 0:
|
|
|
|
|
_checkAndShareGetpubkeyWithPeers(data)
|
|
|
|
|
return 0.1
|
|
|
|
|
elif intObjectType == 1:
|
|
|
|
|
_checkAndSharePubkeyWithPeers(data)
|
|
|
|
|
return 0.1
|
|
|
|
|
elif intObjectType == 2:
|
|
|
|
|
_checkAndShareMsgWithPeers(data)
|
|
|
|
|
return 0.6
|
|
|
|
|
elif intObjectType == 3:
|
|
|
|
|
_checkAndShareBroadcastWithPeers(data)
|
|
|
|
|
return 0.6
|
|
|
|
|
else:
|
|
|
|
|
_checkAndShareUndefinedObjectWithPeers(data)
|
|
|
|
|
return 0.6
|
|
|
|
|
except varintDecodeError as e:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.debug(
|
|
|
|
|
'There was a problem with a varint while checking'
|
|
|
|
|
' to see whether it was appropriate to share an object'
|
|
|
|
|
' with peers. Some details: %s' % e)
|
|
|
|
|
except Exception:
|
|
|
|
|
logger.critical(
|
|
|
|
|
'There was a problem while checking to see whether it was'
|
|
|
|
|
' appropriate to share an object with peers. This is'
|
|
|
|
|
' definitely a bug! \n%s' % traceback.format_exc())
|
2014-08-27 09:14:32 +02:00
|
|
|
|
return 0
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
def _checkAndShareUndefinedObjectWithPeers(data):
|
|
|
|
|
embeddedTime, = unpack('>Q', data[8:16])
|
2017-09-21 17:24:51 +02:00
|
|
|
|
readPosition = 20 # bypass nonce, time, and object type
|
2014-08-27 09:14:32 +02:00
|
|
|
|
objectVersion, objectVersionLength = decodeVarint(
|
|
|
|
|
data[readPosition:readPosition + 9])
|
|
|
|
|
readPosition += objectVersionLength
|
|
|
|
|
streamNumber, streamNumberLength = decodeVarint(
|
|
|
|
|
data[readPosition:readPosition + 9])
|
2017-09-21 17:24:51 +02:00
|
|
|
|
if streamNumber not in state.streamsInWhichIAmParticipating:
|
|
|
|
|
logger.debug(
|
|
|
|
|
'The streamNumber %i isn\'t one we are interested in.',
|
|
|
|
|
streamNumber
|
|
|
|
|
)
|
2014-01-17 02:10:04 +01:00
|
|
|
|
return
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
inventoryHash = calculateInventoryHash(data)
|
2017-01-10 21:15:35 +01:00
|
|
|
|
if inventoryHash in Inventory():
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.debug(
|
|
|
|
|
'We have already received this undefined object. Ignoring.')
|
2014-08-27 09:14:32 +02:00
|
|
|
|
return
|
|
|
|
|
objectType, = unpack('>I', data[16:20])
|
2017-01-10 21:15:35 +01:00
|
|
|
|
Inventory()[inventoryHash] = (
|
2017-09-21 17:24:51 +02:00
|
|
|
|
objectType, streamNumber, data, embeddedTime, '')
|
|
|
|
|
logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
|
|
|
|
|
protocol.broadcastToSendDataQueues(
|
|
|
|
|
(streamNumber, 'advertiseobject', inventoryHash))
|
|
|
|
|
|
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
def _checkAndShareMsgWithPeers(data):
|
|
|
|
|
embeddedTime, = unpack('>Q', data[8:16])
|
2017-09-21 17:24:51 +02:00
|
|
|
|
readPosition = 20 # bypass nonce, time, and object type
|
|
|
|
|
objectVersion, objectVersionLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 9])
|
2015-01-28 20:14:28 +01:00
|
|
|
|
readPosition += objectVersionLength
|
2017-09-21 17:24:51 +02:00
|
|
|
|
streamNumber, streamNumberLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 9])
|
|
|
|
|
if streamNumber not in state.streamsInWhichIAmParticipating:
|
|
|
|
|
logger.debug(
|
|
|
|
|
'The streamNumber %i isn\'t one we are interested in.',
|
|
|
|
|
streamNumber
|
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
2014-08-27 09:14:32 +02:00
|
|
|
|
readPosition += streamNumberLength
|
2013-11-20 07:29:37 +01:00
|
|
|
|
inventoryHash = calculateInventoryHash(data)
|
2017-01-10 21:15:35 +01:00
|
|
|
|
if inventoryHash in Inventory():
|
2014-01-17 02:10:04 +01:00
|
|
|
|
logger.debug('We have already received this msg message. Ignoring.')
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
|
|
|
|
# This msg message is valid. Let's let our peers know about it.
|
2014-08-27 09:14:32 +02:00
|
|
|
|
objectType = 2
|
2017-01-10 21:15:35 +01:00
|
|
|
|
Inventory()[inventoryHash] = (
|
2017-09-21 17:24:51 +02:00
|
|
|
|
objectType, streamNumber, data, embeddedTime, '')
|
|
|
|
|
logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
|
|
|
|
|
protocol.broadcastToSendDataQueues(
|
|
|
|
|
(streamNumber, 'advertiseobject', inventoryHash))
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
|
|
# Now let's enqueue it to be processed ourselves.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
objectProcessorQueue.put((objectType, data))
|
|
|
|
|
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
def _checkAndShareGetpubkeyWithPeers(data):
|
|
|
|
|
if len(data) < 42:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.info(
|
|
|
|
|
'getpubkey message doesn\'t contain enough data. Ignoring.')
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
2014-08-27 09:14:32 +02:00
|
|
|
|
embeddedTime, = unpack('>Q', data[8:16])
|
|
|
|
|
readPosition = 20 # bypass the nonce, time, and object type
|
2017-09-21 17:24:51 +02:00
|
|
|
|
requestedAddressVersionNumber, addressVersionLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 10])
|
2013-11-20 07:29:37 +01:00
|
|
|
|
readPosition += addressVersionLength
|
2017-09-21 17:24:51 +02:00
|
|
|
|
streamNumber, streamNumberLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 10])
|
|
|
|
|
if streamNumber not in state.streamsInWhichIAmParticipating:
|
|
|
|
|
logger.debug(
|
|
|
|
|
'The streamNumber %i isn\'t one we are interested in.',
|
|
|
|
|
streamNumber
|
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
|
|
|
|
readPosition += streamNumberLength
|
|
|
|
|
|
|
|
|
|
inventoryHash = calculateInventoryHash(data)
|
2017-01-10 21:15:35 +01:00
|
|
|
|
if inventoryHash in Inventory():
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.debug(
|
|
|
|
|
'We have already received this getpubkey request. Ignoring it.')
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
objectType = 0
|
2017-01-10 21:15:35 +01:00
|
|
|
|
Inventory()[inventoryHash] = (
|
2017-09-21 17:24:51 +02:00
|
|
|
|
objectType, streamNumber, data, embeddedTime, '')
|
2013-11-20 07:29:37 +01:00
|
|
|
|
# This getpubkey request is valid. Forward to peers.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
|
|
|
|
|
protocol.broadcastToSendDataQueues(
|
|
|
|
|
(streamNumber, 'advertiseobject', inventoryHash))
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
|
|
# Now let's queue it to be processed ourselves.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
objectProcessorQueue.put((objectType, data))
|
|
|
|
|
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
def _checkAndSharePubkeyWithPeers(data):
|
|
|
|
|
if len(data) < 146 or len(data) > 440: # sanity check
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
2014-08-27 09:14:32 +02:00
|
|
|
|
embeddedTime, = unpack('>Q', data[8:16])
|
|
|
|
|
readPosition = 20 # bypass the nonce, time, and object type
|
2017-09-21 17:24:51 +02:00
|
|
|
|
addressVersion, varintLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 10])
|
2013-11-20 07:29:37 +01:00
|
|
|
|
readPosition += varintLength
|
2017-09-21 17:24:51 +02:00
|
|
|
|
streamNumber, varintLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 10])
|
2013-11-20 07:29:37 +01:00
|
|
|
|
readPosition += varintLength
|
2017-09-21 17:24:51 +02:00
|
|
|
|
if streamNumber not in state.streamsInWhichIAmParticipating:
|
|
|
|
|
logger.debug(
|
|
|
|
|
'The streamNumber %i isn\'t one we are interested in.',
|
|
|
|
|
streamNumber
|
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
|
|
|
|
if addressVersion >= 4:
|
|
|
|
|
tag = data[readPosition:readPosition + 32]
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.debug('tag in received pubkey is: %s', hexlify(tag))
|
2013-11-20 07:29:37 +01:00
|
|
|
|
else:
|
|
|
|
|
tag = ''
|
|
|
|
|
|
|
|
|
|
inventoryHash = calculateInventoryHash(data)
|
2017-01-10 21:15:35 +01:00
|
|
|
|
if inventoryHash in Inventory():
|
2014-01-17 02:10:04 +01:00
|
|
|
|
logger.debug('We have already received this pubkey. Ignoring it.')
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
2014-08-27 09:14:32 +02:00
|
|
|
|
objectType = 1
|
2017-01-10 21:15:35 +01:00
|
|
|
|
Inventory()[inventoryHash] = (
|
2013-11-20 07:29:37 +01:00
|
|
|
|
objectType, streamNumber, data, embeddedTime, tag)
|
|
|
|
|
# This object is valid. Forward it to peers.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
|
|
|
|
|
protocol.broadcastToSendDataQueues(
|
|
|
|
|
(streamNumber, 'advertiseobject', inventoryHash))
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
|
|
# Now let's queue it to be processed ourselves.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
objectProcessorQueue.put((objectType, data))
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
|
def _checkAndShareBroadcastWithPeers(data):
|
2013-11-20 07:29:37 +01:00
|
|
|
|
if len(data) < 180:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.debug(
|
|
|
|
|
'The payload length of this broadcast packet is unreasonably low.'
|
|
|
|
|
' Someone is probably trying funny business. Ignoring message.')
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
2014-08-27 09:14:32 +02:00
|
|
|
|
embeddedTime, = unpack('>Q', data[8:16])
|
|
|
|
|
readPosition = 20 # bypass the nonce, time, and object type
|
2017-09-21 17:24:51 +02:00
|
|
|
|
broadcastVersion, broadcastVersionLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 10])
|
2013-12-01 05:15:18 +01:00
|
|
|
|
readPosition += broadcastVersionLength
|
2013-11-20 07:29:37 +01:00
|
|
|
|
if broadcastVersion >= 2:
|
2017-09-21 17:24:51 +02:00
|
|
|
|
streamNumber, streamNumberLength = \
|
|
|
|
|
decodeVarint(data[readPosition:readPosition + 10])
|
2013-12-01 05:15:18 +01:00
|
|
|
|
readPosition += streamNumberLength
|
2017-09-21 17:24:51 +02:00
|
|
|
|
if streamNumber not in state.streamsInWhichIAmParticipating:
|
|
|
|
|
logger.debug(
|
|
|
|
|
'The streamNumber %i isn\'t one we are interested in.',
|
|
|
|
|
streamNumber
|
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
2013-12-01 05:15:18 +01:00
|
|
|
|
if broadcastVersion >= 3:
|
|
|
|
|
tag = data[readPosition:readPosition+32]
|
|
|
|
|
else:
|
|
|
|
|
tag = ''
|
2013-11-20 07:29:37 +01:00
|
|
|
|
inventoryHash = calculateInventoryHash(data)
|
2017-01-10 21:15:35 +01:00
|
|
|
|
if inventoryHash in Inventory():
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.debug(
|
|
|
|
|
'We have already received this broadcast object. Ignoring.')
|
2013-11-20 07:29:37 +01:00
|
|
|
|
return
|
|
|
|
|
# It is valid. Let's let our peers know about it.
|
2014-08-27 09:14:32 +02:00
|
|
|
|
objectType = 3
|
2017-01-10 21:15:35 +01:00
|
|
|
|
Inventory()[inventoryHash] = (
|
2013-12-01 05:15:18 +01:00
|
|
|
|
objectType, streamNumber, data, embeddedTime, tag)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
# This object is valid. Forward it to peers.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
logger.debug('advertising inv with hash: %s', hexlify(inventoryHash))
|
|
|
|
|
protocol.broadcastToSendDataQueues(
|
|
|
|
|
(streamNumber, 'advertiseobject', inventoryHash))
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
|
|
# Now let's queue it to be processed ourselves.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
objectProcessorQueue.put((objectType, data))
|
|
|
|
|
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
2014-09-15 08:34:33 +02:00
|
|
|
|
def openKeysFile():
|
|
|
|
|
if 'linux' in sys.platform:
|
2017-01-11 17:00:00 +01:00
|
|
|
|
subprocess.call(["xdg-open", state.appdata + 'keys.dat'])
|
2014-09-15 08:34:33 +02:00
|
|
|
|
else:
|
2017-01-11 17:00:00 +01:00
|
|
|
|
os.startfile(state.appdata + 'keys.dat')
|