2019-11-04 15:45:36 +01:00
|
|
|
"""
|
|
|
|
The objectProcessor thread, of which there is only one, processes the network objects
|
|
|
|
"""
|
2013-11-14 04:45:10 +01:00
|
|
|
import hashlib
|
2019-08-07 17:31:08 +02:00
|
|
|
import logging
|
2013-11-14 04:45:10 +01:00
|
|
|
import random
|
2018-05-17 11:38:46 +02:00
|
|
|
import threading
|
|
|
|
import time
|
2016-03-23 23:26:57 +01:00
|
|
|
from binascii import hexlify
|
2018-05-17 11:38:46 +02:00
|
|
|
from subprocess import call # nosec
|
2013-11-14 04:45:10 +01:00
|
|
|
|
|
|
|
import highlevelcrypto
|
2019-05-10 08:03:29 +02:00
|
|
|
import knownnodes
|
2019-11-04 15:45:36 +01:00
|
|
|
import shared
|
2018-05-17 11:38:46 +02:00
|
|
|
from addresses import (
|
|
|
|
calculateInventoryHash, decodeAddress, decodeVarint, encodeAddress,
|
|
|
|
encodeVarint, varintDecodeError
|
|
|
|
)
|
2017-02-22 09:34:54 +01:00
|
|
|
from bmconfigparser import BMConfigParser
|
2019-11-04 15:45:36 +01:00
|
|
|
|
2013-11-14 04:45:10 +01:00
|
|
|
import helper_bitcoin
|
|
|
|
import helper_inbox
|
2016-11-14 20:23:58 +01:00
|
|
|
import helper_msgcoding
|
2013-11-14 04:45:10 +01:00
|
|
|
import helper_sent
|
2018-05-17 11:38:46 +02:00
|
|
|
from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery
|
2017-09-30 11:19:44 +02:00
|
|
|
from helper_ackPayload import genAckPayload
|
2018-10-12 23:12:00 +02:00
|
|
|
from network import bmproto
|
2019-11-03 16:11:52 +01:00
|
|
|
from network.node import Peer
|
2019-11-04 15:45:36 +01:00
|
|
|
|
2017-01-11 14:27:19 +01:00
|
|
|
import protocol
|
2017-02-08 13:41:56 +01:00
|
|
|
import queues
|
2017-01-14 23:20:15 +01:00
|
|
|
import state
|
2013-11-14 04:45:10 +01:00
|
|
|
import tr
|
2019-01-31 16:42:22 +01:00
|
|
|
from fallback import RIPEMD160Hash
|
2019-11-04 15:45:36 +01:00
|
|
|
|
2014-08-06 04:01:01 +02:00
|
|
|
import l10n
|
2019-12-30 13:10:02 +01:00
|
|
|
# pylint: disable=too-many-locals, too-many-return-statements
|
|
|
|
# pylint: disable=too-many-branches, too-many-statements
|
2013-11-14 04:45:10 +01:00
|
|
|
|
2019-08-07 17:31:08 +02:00
|
|
|
logger = logging.getLogger('default')
|
|
|
|
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2013-11-14 04:45:10 +01:00
|
|
|
class objectProcessor(threading.Thread):
|
|
|
|
"""
|
|
|
|
The objectProcessor thread, of which there is only one, receives network
|
2016-03-18 18:56:40 +01:00
|
|
|
objects (msg, broadcast, pubkey, getpubkey) from the receiveDataThreads.
|
2013-11-14 04:45:10 +01:00
|
|
|
"""
|
|
|
|
def __init__(self):
|
2019-08-01 13:37:26 +02:00
|
|
|
threading.Thread.__init__(self, name="objectProcessor")
|
|
|
|
random.seed()
|
2020-01-23 09:42:32 +01:00
|
|
|
# It may be the case that the last time Bitmes0sage was running,
|
2018-10-19 09:12:48 +02:00
|
|
|
# the user closed it before it finished processing everything in the
|
|
|
|
# objectProcessorQueue. Assuming that Bitmessage wasn't closed
|
|
|
|
# forcefully, it should have saved the data in the queue into the
|
|
|
|
# objectprocessorqueue table. Let's pull it out.
|
2013-12-02 07:35:34 +01:00
|
|
|
queryreturn = sqlQuery(
|
|
|
|
'''SELECT objecttype, data FROM objectprocessorqueue''')
|
2016-01-22 11:17:10 +01:00
|
|
|
for row in queryreturn:
|
|
|
|
objectType, data = row
|
2018-05-17 11:38:46 +02:00
|
|
|
queues.objectProcessorQueue.put((objectType, data))
|
2013-12-02 07:35:34 +01:00
|
|
|
sqlExecute('''DELETE FROM objectprocessorqueue''')
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'Loaded %s objects from disk into the objectProcessorQueue.',
|
|
|
|
len(queryreturn))
|
2018-10-12 23:12:00 +02:00
|
|
|
self._ack_obj = bmproto.BMStringParser()
|
2019-02-01 18:17:49 +01:00
|
|
|
self.successfullyDecryptMessageTimings = []
|
2013-11-14 04:45:10 +01:00
|
|
|
|
|
|
|
def run(self):
|
2019-10-27 14:15:45 +01:00
|
|
|
"""Process the objects from `.queues.objectProcessorQueue`"""
|
2013-11-14 04:45:10 +01:00
|
|
|
while True:
|
2017-02-08 13:41:56 +01:00
|
|
|
objectType, data = queues.objectProcessorQueue.get()
|
2017-04-04 10:43:29 +02:00
|
|
|
self.checkackdata(data)
|
2014-08-27 09:14:32 +02:00
|
|
|
try:
|
2018-10-19 09:12:48 +02:00
|
|
|
if objectType == protocol.OBJECT_GETPUBKEY:
|
2014-08-27 09:14:32 +02:00
|
|
|
self.processgetpubkey(data)
|
2018-10-19 09:12:48 +02:00
|
|
|
elif objectType == protocol.OBJECT_PUBKEY:
|
2014-08-27 09:14:32 +02:00
|
|
|
self.processpubkey(data)
|
2018-10-19 09:12:48 +02:00
|
|
|
elif objectType == protocol.OBJECT_MSG:
|
2014-08-27 09:14:32 +02:00
|
|
|
self.processmsg(data)
|
2018-10-19 09:12:48 +02:00
|
|
|
elif objectType == protocol.OBJECT_BROADCAST:
|
2014-08-27 09:14:32 +02:00
|
|
|
self.processbroadcast(data)
|
2019-04-26 15:59:56 +02:00
|
|
|
elif objectType == protocol.OBJECT_ONIONPEER:
|
|
|
|
self.processonion(data)
|
2018-05-17 11:38:46 +02:00
|
|
|
# is more of a command, not an object type. Is used to get
|
|
|
|
# this thread past the queue.get() so that it will check
|
|
|
|
# the shutdown variable.
|
|
|
|
elif objectType == 'checkShutdownVariable':
|
2014-08-27 09:14:32 +02:00
|
|
|
pass
|
|
|
|
else:
|
2017-07-05 09:01:40 +02:00
|
|
|
if isinstance(objectType, int):
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Don\'t know how to handle object type 0x%08X',
|
|
|
|
objectType)
|
2017-07-05 09:01:40 +02:00
|
|
|
else:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Don\'t know how to handle object type %s',
|
|
|
|
objectType)
|
2017-05-15 12:23:16 +02:00
|
|
|
except helper_msgcoding.DecompressionSizeException as e:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.error(
|
|
|
|
'The object is too big after decompression (stopped'
|
|
|
|
' decompressing at %ib, your configured limit %ib).'
|
|
|
|
' Ignoring',
|
|
|
|
e.size, BMConfigParser().safeGetInt("zlib", "maxsize"))
|
2014-08-27 09:14:32 +02:00
|
|
|
except varintDecodeError as e:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'There was a problem with a varint while processing an'
|
|
|
|
' object. Some details: %s', e)
|
|
|
|
except Exception:
|
|
|
|
logger.critical(
|
|
|
|
'Critical error within objectProcessorThread: \n',
|
|
|
|
exc_info=True)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2017-01-14 23:20:15 +01:00
|
|
|
if state.shutdown:
|
2018-05-17 11:38:46 +02:00
|
|
|
# Wait just a moment for most of the connections to close
|
|
|
|
time.sleep(.5)
|
2013-12-02 07:35:34 +01:00
|
|
|
numberOfObjectsThatWereInTheObjectProcessorQueue = 0
|
|
|
|
with SqlBulkExecute() as sql:
|
2017-02-08 13:41:56 +01:00
|
|
|
while queues.objectProcessorQueue.curSize > 0:
|
|
|
|
objectType, data = queues.objectProcessorQueue.get()
|
2018-05-17 11:38:46 +02:00
|
|
|
sql.execute(
|
|
|
|
'INSERT INTO objectprocessorqueue VALUES (?,?)',
|
|
|
|
objectType, data)
|
2013-12-02 07:35:34 +01:00
|
|
|
numberOfObjectsThatWereInTheObjectProcessorQueue += 1
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'Saved %s objects from the objectProcessorQueue to'
|
|
|
|
' disk. objectProcessorThread exiting.',
|
|
|
|
numberOfObjectsThatWereInTheObjectProcessorQueue)
|
2017-01-14 23:20:15 +01:00
|
|
|
state.shutdown = 2
|
2013-12-02 07:35:34 +01:00
|
|
|
break
|
2017-04-04 10:43:29 +02:00
|
|
|
|
2019-11-04 15:45:36 +01:00
|
|
|
@staticmethod
|
|
|
|
def checkackdata(data):
|
|
|
|
"""Checking Acknowledgement of message received or not?"""
|
|
|
|
# pylint: disable=protected-access
|
2017-04-04 10:43:29 +02:00
|
|
|
# Let's check whether this is a message acknowledgement bound for us.
|
|
|
|
if len(data) < 32:
|
|
|
|
return
|
2017-09-30 11:19:44 +02:00
|
|
|
|
|
|
|
# bypass nonce and time, retain object type/version/stream + body
|
|
|
|
readPosition = 16
|
2017-09-25 11:12:00 +02:00
|
|
|
|
2020-01-06 12:14:13 +01:00
|
|
|
if bytes(data[readPosition:]) in shared.ackdataForWhichImWatching:
|
2017-04-04 10:43:29 +02:00
|
|
|
logger.info('This object is an acknowledgement bound for me.')
|
2017-09-25 11:12:00 +02:00
|
|
|
del shared.ackdataForWhichImWatching[data[readPosition:]]
|
2018-05-17 11:38:46 +02:00
|
|
|
sqlExecute(
|
|
|
|
'UPDATE sent SET status=?, lastactiontime=?'
|
|
|
|
' WHERE ackdata=?',
|
2020-01-08 12:45:45 +01:00
|
|
|
'ackreceived', int(time.time()), data[readPosition:])
|
2018-05-17 11:38:46 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata',
|
2019-12-23 10:49:03 +01:00
|
|
|
(
|
|
|
|
data[readPosition:],
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Acknowledgement of the message received %1"
|
|
|
|
).arg(l10n.formatTimestamp())
|
|
|
|
)
|
2018-05-17 11:38:46 +02:00
|
|
|
))
|
2017-04-04 10:43:29 +02:00
|
|
|
else:
|
|
|
|
logger.debug('This object is not an acknowledgement bound for me.')
|
|
|
|
|
2019-05-06 12:05:21 +02:00
|
|
|
@staticmethod
|
|
|
|
def processonion(data):
|
|
|
|
"""Process onionpeer object"""
|
2019-04-26 15:59:56 +02:00
|
|
|
readPosition = 20 # bypass the nonce, time, and object type
|
|
|
|
length = decodeVarint(data[readPosition:readPosition + 10])[1]
|
|
|
|
readPosition += length
|
|
|
|
stream, length = decodeVarint(data[readPosition:readPosition + 10])
|
|
|
|
readPosition += length
|
|
|
|
# it seems that stream is checked in network.bmproto
|
|
|
|
port, length = decodeVarint(data[readPosition:readPosition + 10])
|
|
|
|
host = protocol.checkIPAddress(data[readPosition + length:])
|
|
|
|
|
|
|
|
if not host:
|
|
|
|
return
|
2019-11-03 16:11:52 +01:00
|
|
|
peer = Peer(host, port)
|
2019-04-26 15:59:56 +02:00
|
|
|
with knownnodes.knownNodesLock:
|
|
|
|
knownnodes.addKnownNode(
|
|
|
|
stream, peer, is_self=state.ownAddresses.get(peer))
|
|
|
|
|
2019-05-06 12:05:21 +02:00
|
|
|
@staticmethod
|
|
|
|
def processgetpubkey(data):
|
|
|
|
"""Process getpubkey object"""
|
2017-04-04 10:44:53 +02:00
|
|
|
if len(data) > 200:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'getpubkey is abnormally long. Sanity check failed.'
|
|
|
|
' Ignoring object.')
|
2017-04-04 10:44:53 +02:00
|
|
|
return
|
2014-08-27 09:14:32 +02:00
|
|
|
readPosition = 20 # bypass the nonce, time, and object type
|
2013-11-20 07:29:37 +01:00
|
|
|
requestedAddressVersionNumber, addressVersionLength = decodeVarint(
|
|
|
|
data[readPosition:readPosition + 10])
|
|
|
|
readPosition += addressVersionLength
|
|
|
|
streamNumber, streamNumberLength = decodeVarint(
|
|
|
|
data[readPosition:readPosition + 10])
|
|
|
|
readPosition += streamNumberLength
|
|
|
|
|
|
|
|
if requestedAddressVersionNumber == 0:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'The requestedAddressVersionNumber of the pubkey request'
|
|
|
|
' is zero. That doesn\'t make any sense. Ignoring it.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
elif requestedAddressVersionNumber == 1:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'The requestedAddressVersionNumber of the pubkey request'
|
|
|
|
' is 1 which isn\'t supported anymore. Ignoring it.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
elif requestedAddressVersionNumber > 4:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'The requestedAddressVersionNumber of the pubkey request'
|
|
|
|
' is too high. Can\'t understand. Ignoring it.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
myAddress = ''
|
2018-05-17 11:38:46 +02:00
|
|
|
if requestedAddressVersionNumber <= 3:
|
2013-11-20 07:29:37 +01:00
|
|
|
requestedHash = data[readPosition:readPosition + 20]
|
|
|
|
if len(requestedHash) != 20:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'The length of the requested hash is not 20 bytes.'
|
|
|
|
' Something is wrong. Ignoring.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'the hash requested in this getpubkey request is: %s',
|
|
|
|
hexlify(requestedHash))
|
|
|
|
# if this address hash is one of mine
|
2020-01-06 12:14:13 +01:00
|
|
|
if bytes(requestedHash) in shared.myAddressesByHash:
|
2013-11-20 07:29:37 +01:00
|
|
|
myAddress = shared.myAddressesByHash[requestedHash]
|
|
|
|
elif requestedAddressVersionNumber >= 4:
|
|
|
|
requestedTag = data[readPosition:readPosition + 32]
|
|
|
|
if len(requestedTag) != 32:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'The length of the requested tag is not 32 bytes.'
|
|
|
|
' Something is wrong. Ignoring.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'the tag requested in this getpubkey request is: %s',
|
|
|
|
hexlify(requestedTag))
|
2020-01-23 09:42:32 +01:00
|
|
|
# import pdb;pdb.set_trace()
|
2020-01-06 12:14:13 +01:00
|
|
|
if bytes(requestedTag) in shared.myAddressesByTag:
|
2020-01-23 09:42:32 +01:00
|
|
|
myAddress = shared.myAddressesByTag[bytes(requestedTag)]
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
if myAddress == '':
|
2014-01-17 02:10:04 +01:00
|
|
|
logger.info('This getpubkey request is not for any of my keys.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
if decodeAddress(myAddress)[1] != requestedAddressVersionNumber:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.warning(
|
|
|
|
'(Within the processgetpubkey function) Someone requested'
|
|
|
|
' one of my pubkeys but the requestedAddressVersionNumber'
|
|
|
|
' doesn\'t match my actual address version number.'
|
|
|
|
' Ignoring.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
if decodeAddress(myAddress)[2] != streamNumber:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.warning(
|
|
|
|
'(Within the processgetpubkey function) Someone requested'
|
|
|
|
' one of my pubkeys but the stream number on which we'
|
|
|
|
' heard this getpubkey object doesn\'t match this'
|
|
|
|
' address\' stream number. Ignoring.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2017-01-11 14:27:19 +01:00
|
|
|
if BMConfigParser().safeGetBoolean(myAddress, 'chan'):
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Ignoring getpubkey request because it is for one of my'
|
|
|
|
' chan addresses. The other party should already have'
|
|
|
|
' the pubkey.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2018-10-19 09:12:48 +02:00
|
|
|
lastPubkeySendTime = BMConfigParser().safeGetInt(
|
|
|
|
myAddress, 'lastpubkeysendtime')
|
2018-05-17 11:38:46 +02:00
|
|
|
# If the last time we sent our pubkey was more recent than
|
|
|
|
# 28 days ago...
|
|
|
|
if lastPubkeySendTime > time.time() - 2419200:
|
|
|
|
logger.info(
|
|
|
|
'Found getpubkey-requested-item in my list of EC hashes'
|
|
|
|
' BUT we already sent it recently. Ignoring request.'
|
|
|
|
' The lastPubkeySendTime is: %s', lastPubkeySendTime)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Found getpubkey-requested-hash in my list of EC hashes.'
|
|
|
|
' Telling Worker thread to do the POW for a pubkey message'
|
|
|
|
' and send it out.')
|
2013-11-20 07:29:37 +01:00
|
|
|
if requestedAddressVersionNumber == 2:
|
2018-05-17 11:38:46 +02:00
|
|
|
queues.workerQueue.put(('doPOWForMyV2Pubkey', requestedHash))
|
2013-11-20 07:29:37 +01:00
|
|
|
elif requestedAddressVersionNumber == 3:
|
2018-05-17 11:38:46 +02:00
|
|
|
queues.workerQueue.put(('sendOutOrStoreMyV3Pubkey', requestedHash))
|
2013-11-20 07:29:37 +01:00
|
|
|
elif requestedAddressVersionNumber == 4:
|
2018-05-17 11:38:46 +02:00
|
|
|
queues.workerQueue.put(('sendOutOrStoreMyV4Pubkey', myAddress))
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
def processpubkey(self, data):
|
2019-11-04 15:45:36 +01:00
|
|
|
"""Process a pubkey object"""
|
2013-11-20 07:29:37 +01:00
|
|
|
pubkeyProcessingStartTime = time.time()
|
|
|
|
shared.numberOfPubkeysProcessed += 1
|
2017-10-19 08:39:09 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateNumberOfPubkeysProcessed', 'no data'))
|
2014-08-27 09:14:32 +02:00
|
|
|
readPosition = 20 # bypass the nonce, time, and object type
|
2013-11-20 07:29:37 +01:00
|
|
|
addressVersion, varintLength = decodeVarint(
|
|
|
|
data[readPosition:readPosition + 10])
|
|
|
|
readPosition += varintLength
|
|
|
|
streamNumber, varintLength = decodeVarint(
|
|
|
|
data[readPosition:readPosition + 10])
|
|
|
|
readPosition += varintLength
|
|
|
|
if addressVersion == 0:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'(Within processpubkey) addressVersion of 0 doesn\'t'
|
|
|
|
' make sense.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
if addressVersion > 4 or addressVersion == 1:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'This version of Bitmessage cannot handle version %s'
|
|
|
|
' addresses.', addressVersion)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
if addressVersion == 2:
|
2018-05-17 11:38:46 +02:00
|
|
|
# sanity check. This is the minimum possible length.
|
|
|
|
if len(data) < 146:
|
|
|
|
logger.debug(
|
|
|
|
'(within processpubkey) payloadLength less than 146.'
|
|
|
|
' Sanity check failed.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
readPosition += 4
|
|
|
|
publicSigningKey = data[readPosition:readPosition + 64]
|
|
|
|
# Is it possible for a public key to be invalid such that trying to
|
2014-12-25 09:57:34 +01:00
|
|
|
# encrypt or sign with it will cause an error? If it is, it would
|
|
|
|
# be easiest to test them here.
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition += 64
|
|
|
|
publicEncryptionKey = data[readPosition:readPosition + 64]
|
|
|
|
if len(publicEncryptionKey) < 64:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'publicEncryptionKey length less than 64. Sanity check'
|
|
|
|
' failed.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2014-12-25 09:57:34 +01:00
|
|
|
readPosition += 64
|
2018-05-17 11:38:46 +02:00
|
|
|
# The data we'll store in the pubkeys table.
|
|
|
|
dataToStore = data[20:readPosition]
|
2013-11-20 07:29:37 +01:00
|
|
|
sha = hashlib.new('sha512')
|
|
|
|
sha.update(
|
2020-01-06 12:14:13 +01:00
|
|
|
'\x04'.encode() + publicSigningKey + '\x04'.encode() + publicEncryptionKey)
|
2019-01-31 16:42:22 +01:00
|
|
|
ripe = RIPEMD160Hash(sha.digest()).digest()
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2019-08-07 17:31:08 +02:00
|
|
|
if logger.isEnabledFor(logging.DEBUG):
|
|
|
|
logger.debug(
|
|
|
|
'within recpubkey, addressVersion: %s, streamNumber: %s'
|
|
|
|
'\nripe %s\npublicSigningKey in hex: %s'
|
|
|
|
'\npublicEncryptionKey in hex: %s',
|
|
|
|
addressVersion, streamNumber, hexlify(ripe),
|
|
|
|
hexlify(publicSigningKey), hexlify(publicEncryptionKey)
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2015-03-09 07:35:32 +01:00
|
|
|
address = encodeAddress(addressVersion, streamNumber, ripe)
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2015-03-09 07:35:32 +01:00
|
|
|
queryreturn = sqlQuery(
|
2018-05-17 11:38:46 +02:00
|
|
|
"SELECT usedpersonally FROM pubkeys WHERE address=?"
|
|
|
|
" AND usedpersonally='yes'", address)
|
|
|
|
# if this pubkey is already in our database and if we have
|
|
|
|
# used it personally:
|
|
|
|
if queryreturn != []:
|
|
|
|
logger.info(
|
|
|
|
'We HAVE used this pubkey personally. Updating time.')
|
|
|
|
t = (address, addressVersion, dataToStore,
|
|
|
|
int(time.time()), 'yes')
|
2013-11-20 07:29:37 +01:00
|
|
|
else:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'We have NOT used this pubkey personally. Inserting'
|
|
|
|
' in database.')
|
|
|
|
t = (address, addressVersion, dataToStore,
|
|
|
|
int(time.time()), 'no')
|
2013-11-20 07:29:37 +01:00
|
|
|
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
2015-03-09 07:35:32 +01:00
|
|
|
self.possibleNewPubkey(address)
|
2013-11-20 07:29:37 +01:00
|
|
|
if addressVersion == 3:
|
|
|
|
if len(data) < 170: # sanity check.
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.warning(
|
|
|
|
'(within processpubkey) payloadLength less than 170.'
|
|
|
|
' Sanity check failed.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
readPosition += 4
|
2020-01-06 12:14:13 +01:00
|
|
|
publicSigningKey = ('\x04').encode() + data[readPosition:readPosition + 64]
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition += 64
|
2020-01-06 12:14:13 +01:00
|
|
|
publicEncryptionKey = ('\x04').encode() + data[readPosition:readPosition + 64]
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition += 64
|
2018-05-02 17:29:55 +02:00
|
|
|
_, specifiedNonceTrialsPerByteLength = decodeVarint(
|
2013-11-20 07:29:37 +01:00
|
|
|
data[readPosition:readPosition + 10])
|
|
|
|
readPosition += specifiedNonceTrialsPerByteLength
|
2018-05-02 17:29:55 +02:00
|
|
|
_, specifiedPayloadLengthExtraBytesLength = decodeVarint(
|
2013-11-20 07:29:37 +01:00
|
|
|
data[readPosition:readPosition + 10])
|
|
|
|
readPosition += specifiedPayloadLengthExtraBytesLength
|
|
|
|
endOfSignedDataPosition = readPosition
|
2018-05-17 11:38:46 +02:00
|
|
|
# The data we'll store in the pubkeys table.
|
|
|
|
dataToStore = data[20:readPosition]
|
2013-11-20 07:29:37 +01:00
|
|
|
signatureLength, signatureLengthLength = decodeVarint(
|
|
|
|
data[readPosition:readPosition + 10])
|
|
|
|
readPosition += signatureLengthLength
|
2020-01-08 12:45:45 +01:00
|
|
|
signature = bytes(data[readPosition:readPosition + signatureLength])
|
2018-05-17 11:38:46 +02:00
|
|
|
if highlevelcrypto.verify(
|
2020-01-08 12:45:45 +01:00
|
|
|
bytes(data[8:endOfSignedDataPosition]),
|
2018-05-17 11:38:46 +02:00
|
|
|
signature, hexlify(publicSigningKey)):
|
2014-12-25 09:57:34 +01:00
|
|
|
logger.debug('ECDSA verify passed (within processpubkey)')
|
2014-08-27 09:14:32 +02:00
|
|
|
else:
|
2014-12-25 09:57:34 +01:00
|
|
|
logger.warning('ECDSA verify failed (within processpubkey)')
|
|
|
|
return
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
sha = hashlib.new('sha512')
|
|
|
|
sha.update(publicSigningKey + publicEncryptionKey)
|
2019-01-31 16:42:22 +01:00
|
|
|
ripe = RIPEMD160Hash(sha.digest()).digest()
|
2014-01-17 02:10:04 +01:00
|
|
|
|
2019-08-07 17:31:08 +02:00
|
|
|
if logger.isEnabledFor(logging.DEBUG):
|
|
|
|
logger.debug(
|
|
|
|
'within recpubkey, addressVersion: %s, streamNumber: %s'
|
|
|
|
'\nripe %s\npublicSigningKey in hex: %s'
|
|
|
|
'\npublicEncryptionKey in hex: %s',
|
|
|
|
addressVersion, streamNumber, hexlify(ripe),
|
|
|
|
hexlify(publicSigningKey), hexlify(publicEncryptionKey)
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2015-03-09 07:35:32 +01:00
|
|
|
address = encodeAddress(addressVersion, streamNumber, ripe)
|
2018-05-17 11:38:46 +02:00
|
|
|
queryreturn = sqlQuery(
|
|
|
|
"SELECT usedpersonally FROM pubkeys WHERE address=?"
|
|
|
|
" AND usedpersonally='yes'", address)
|
|
|
|
# if this pubkey is already in our database and if we have
|
|
|
|
# used it personally:
|
|
|
|
if queryreturn != []:
|
|
|
|
logger.info(
|
|
|
|
'We HAVE used this pubkey personally. Updating time.')
|
|
|
|
t = (address, addressVersion, dataToStore,
|
|
|
|
int(time.time()), 'yes')
|
2013-11-20 07:29:37 +01:00
|
|
|
else:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'We have NOT used this pubkey personally. Inserting'
|
|
|
|
' in database.')
|
|
|
|
t = (address, addressVersion, dataToStore,
|
|
|
|
int(time.time()), 'no')
|
2013-11-20 07:29:37 +01:00
|
|
|
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
2015-03-09 07:35:32 +01:00
|
|
|
self.possibleNewPubkey(address)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
if addressVersion == 4:
|
|
|
|
if len(data) < 350: # sanity check.
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'(within processpubkey) payloadLength less than 350.'
|
|
|
|
' Sanity check failed.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2014-08-27 09:14:32 +02:00
|
|
|
|
2013-11-20 07:29:37 +01:00
|
|
|
tag = data[readPosition:readPosition + 32]
|
2020-01-23 09:42:32 +01:00
|
|
|
if bytes(tag) not in state.neededPubkeys:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'We don\'t need this v4 pubkey. We didn\'t ask for it.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
# Let us try to decrypt the pubkey
|
2020-01-23 09:42:32 +01:00
|
|
|
print("TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT#################################################", tag)
|
|
|
|
toAddress, _ = state.neededPubkeys[bytes(tag)] #check with py2
|
|
|
|
# import pdb;pdb.set_trace()
|
|
|
|
if protocol.decryptAndCheckPubkeyPayload(bytes(data), toAddress) == \
|
2018-05-17 11:38:46 +02:00
|
|
|
'successful':
|
|
|
|
# At this point we know that we have been waiting on this
|
|
|
|
# pubkey. This function will command the workerThread
|
|
|
|
# to start work on the messages that require it.
|
2020-01-23 09:42:32 +01:00
|
|
|
print("decryptAndCheckPubkeyPayload completed#########################################################")
|
2015-03-09 07:35:32 +01:00
|
|
|
self.possibleNewPubkey(toAddress)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
# Display timing data
|
|
|
|
timeRequiredToProcessPubkey = time.time(
|
|
|
|
) - pubkeyProcessingStartTime
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'Time required to process this pubkey: %s',
|
|
|
|
timeRequiredToProcessPubkey)
|
2013-11-14 04:45:10 +01:00
|
|
|
|
|
|
|
def processmsg(self, data):
|
2019-11-04 15:45:36 +01:00
|
|
|
"""Process a message object"""
|
2013-11-20 07:29:37 +01:00
|
|
|
messageProcessingStartTime = time.time()
|
|
|
|
shared.numberOfMessagesProcessed += 1
|
2017-10-19 08:39:09 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateNumberOfMessagesProcessed', 'no data'))
|
2018-05-17 11:38:46 +02:00
|
|
|
readPosition = 20 # bypass the nonce, time, and object type
|
|
|
|
msgVersion, msgVersionLength = decodeVarint(
|
|
|
|
data[readPosition:readPosition + 9])
|
2014-12-25 09:57:34 +01:00
|
|
|
if msgVersion != 1:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Cannot understand message versions other than one.'
|
|
|
|
' Ignoring message.')
|
2014-12-25 09:57:34 +01:00
|
|
|
return
|
|
|
|
readPosition += msgVersionLength
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2018-05-17 11:38:46 +02:00
|
|
|
streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = \
|
|
|
|
decodeVarint(data[readPosition:readPosition + 9])
|
2013-11-14 04:45:10 +01:00
|
|
|
readPosition += streamNumberAsClaimedByMsgLength
|
|
|
|
inventoryHash = calculateInventoryHash(data)
|
|
|
|
initialDecryptionSuccessful = False
|
|
|
|
|
|
|
|
# This is not an acknowledgement bound for me. See if it is a message
|
|
|
|
# bound for me by trying to decrypt it with my private keys.
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2018-05-17 11:38:46 +02:00
|
|
|
for key, cryptorObject in sorted(
|
|
|
|
shared.myECCryptorObjects.items(),
|
|
|
|
key=lambda x: random.random()):
|
2013-11-14 04:45:10 +01:00
|
|
|
try:
|
2018-05-17 11:38:46 +02:00
|
|
|
# continue decryption attempts to avoid timing attacks
|
|
|
|
if initialDecryptionSuccessful:
|
2016-02-18 16:01:06 +01:00
|
|
|
cryptorObject.decrypt(data[readPosition:])
|
|
|
|
else:
|
|
|
|
decryptedData = cryptorObject.decrypt(data[readPosition:])
|
2018-05-17 11:38:46 +02:00
|
|
|
# This is the RIPE hash of my pubkeys. We need this
|
|
|
|
# below to compare to the destination_ripe included
|
|
|
|
# in the encrypted data.
|
|
|
|
toRipe = key
|
2016-02-18 16:01:06 +01:00
|
|
|
initialDecryptionSuccessful = True
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'EC decryption successful using key associated'
|
|
|
|
' with ripe hash: %s.', hexlify(key))
|
2018-05-02 17:29:55 +02:00
|
|
|
except Exception:
|
2014-12-25 09:57:34 +01:00
|
|
|
pass
|
2013-11-14 04:45:10 +01:00
|
|
|
if not initialDecryptionSuccessful:
|
|
|
|
# This is not a message bound for me.
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Length of time program spent failing to decrypt this'
|
|
|
|
' message: %s seconds.',
|
|
|
|
time.time() - messageProcessingStartTime)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2013-11-14 04:45:10 +01:00
|
|
|
|
2013-11-20 07:29:37 +01:00
|
|
|
# This is a message bound for me.
|
2018-10-19 09:12:48 +02:00
|
|
|
# Look up my address based on the RIPE hash.
|
|
|
|
toAddress = shared.myAddressesByHash[toRipe]
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition = 0
|
2018-05-17 11:38:46 +02:00
|
|
|
sendersAddressVersionNumber, sendersAddressVersionNumberLength = \
|
|
|
|
decodeVarint(decryptedData[readPosition:readPosition + 10])
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition += sendersAddressVersionNumberLength
|
|
|
|
if sendersAddressVersionNumber == 0:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Cannot understand sendersAddressVersionNumber = 0.'
|
|
|
|
' Ignoring message.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
if sendersAddressVersionNumber > 4:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Sender\'s address version number %s not yet supported.'
|
|
|
|
' Ignoring message.', sendersAddressVersionNumber)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
if len(decryptedData) < 170:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Length of the unencrypted data is unreasonably short.'
|
|
|
|
' Sanity check failed. Ignoring message.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
sendersStreamNumber, sendersStreamNumberLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 10])
|
|
|
|
if sendersStreamNumber == 0:
|
2014-01-17 02:10:04 +01:00
|
|
|
logger.info('sender\'s stream number is 0. Ignoring message.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
readPosition += sendersStreamNumberLength
|
|
|
|
readPosition += 4
|
2018-10-19 09:12:48 +02:00
|
|
|
pubSigningKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition += 64
|
2018-10-19 09:12:48 +02:00
|
|
|
pubEncryptionKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition += 64
|
|
|
|
if sendersAddressVersionNumber >= 3:
|
2018-05-17 11:38:46 +02:00
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte, varintLength = \
|
|
|
|
decodeVarint(decryptedData[readPosition:readPosition + 10])
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition += varintLength
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'sender\'s requiredAverageProofOfWorkNonceTrialsPerByte is %s',
|
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte)
|
2013-11-20 07:29:37 +01:00
|
|
|
requiredPayloadLengthExtraBytes, varintLength = decodeVarint(
|
2013-11-14 04:45:10 +01:00
|
|
|
decryptedData[readPosition:readPosition + 10])
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition += varintLength
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'sender\'s requiredPayloadLengthExtraBytes is %s',
|
|
|
|
requiredPayloadLengthExtraBytes)
|
|
|
|
# needed for when we store the pubkey in our database of pubkeys
|
|
|
|
# for later use.
|
|
|
|
endOfThePublicKeyPosition = readPosition
|
2013-11-20 07:29:37 +01:00
|
|
|
if toRipe != decryptedData[readPosition:readPosition + 20]:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'The original sender of this message did not send it to'
|
|
|
|
' you. Someone is attempting a Surreptitious Forwarding'
|
|
|
|
' Attack.\nSee: '
|
|
|
|
'http://world.std.com/~dtd/sign_encrypt/sign_encrypt7.html'
|
|
|
|
'\nyour toRipe: %s\nembedded destination toRipe: %s',
|
|
|
|
hexlify(toRipe),
|
|
|
|
hexlify(decryptedData[readPosition:readPosition + 20])
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
readPosition += 20
|
|
|
|
messageEncodingType, messageEncodingTypeLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 10])
|
|
|
|
readPosition += messageEncodingTypeLength
|
|
|
|
messageLength, messageLengthLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 10])
|
|
|
|
readPosition += messageLengthLength
|
|
|
|
message = decryptedData[readPosition:readPosition + messageLength]
|
|
|
|
# print 'First 150 characters of message:', repr(message[:150])
|
|
|
|
readPosition += messageLength
|
|
|
|
ackLength, ackLengthLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 10])
|
|
|
|
readPosition += ackLengthLength
|
|
|
|
ackData = decryptedData[readPosition:readPosition + ackLength]
|
|
|
|
readPosition += ackLength
|
2018-05-17 11:38:46 +02:00
|
|
|
# needed to mark the end of what is covered by the signature
|
|
|
|
positionOfBottomOfAckData = readPosition
|
2013-11-20 07:29:37 +01:00
|
|
|
signatureLength, signatureLengthLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 10])
|
|
|
|
readPosition += signatureLengthLength
|
|
|
|
signature = decryptedData[
|
|
|
|
readPosition:readPosition + signatureLength]
|
2018-05-17 11:38:46 +02:00
|
|
|
signedData = data[8:20] + encodeVarint(1) + encodeVarint(
|
|
|
|
streamNumberAsClaimedByMsg
|
|
|
|
) + decryptedData[:positionOfBottomOfAckData]
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2018-05-17 11:38:46 +02:00
|
|
|
if not highlevelcrypto.verify(
|
|
|
|
signedData, signature, hexlify(pubSigningKey)):
|
2014-08-27 09:14:32 +02:00
|
|
|
logger.debug('ECDSA verify failed')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2014-08-27 09:14:32 +02:00
|
|
|
logger.debug('ECDSA verify passed')
|
2019-08-07 17:31:08 +02:00
|
|
|
if logger.isEnabledFor(logging.DEBUG):
|
|
|
|
logger.debug(
|
|
|
|
'As a matter of intellectual curiosity, here is the Bitcoin'
|
|
|
|
' address associated with the keys owned by the other person:'
|
|
|
|
' %s ..and here is the testnet address: %s. The other person'
|
|
|
|
' must take their private signing key from Bitmessage and'
|
|
|
|
' import it into Bitcoin (or a service like Blockchain.info)'
|
|
|
|
' for it to be of any use. Do not use this unless you know'
|
|
|
|
' what you are doing.',
|
|
|
|
helper_bitcoin.calculateBitcoinAddressFromPubkey(pubSigningKey),
|
|
|
|
helper_bitcoin.calculateTestnetAddressFromPubkey(pubSigningKey)
|
|
|
|
)
|
2018-05-17 11:38:46 +02:00
|
|
|
# Used to detect and ignore duplicate messages in our inbox
|
|
|
|
sigHash = hashlib.sha512(
|
|
|
|
hashlib.sha512(signature).digest()).digest()[32:]
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
# calculate the fromRipe.
|
|
|
|
sha = hashlib.new('sha512')
|
|
|
|
sha.update(pubSigningKey + pubEncryptionKey)
|
2019-01-31 16:42:22 +01:00
|
|
|
ripe = RIPEMD160Hash(sha.digest()).digest()
|
2013-11-20 07:29:37 +01:00
|
|
|
fromAddress = encodeAddress(
|
2019-01-31 16:42:22 +01:00
|
|
|
sendersAddressVersionNumber, sendersStreamNumber, ripe)
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2013-11-20 07:29:37 +01:00
|
|
|
# Let's store the public key in case we want to reply to this
|
|
|
|
# person.
|
2014-12-25 09:57:34 +01:00
|
|
|
sqlExecute(
|
|
|
|
'''INSERT INTO pubkeys VALUES (?,?,?,?,?)''',
|
2015-03-09 07:35:32 +01:00
|
|
|
fromAddress,
|
2014-12-25 09:57:34 +01:00
|
|
|
sendersAddressVersionNumber,
|
|
|
|
decryptedData[:endOfThePublicKeyPosition],
|
|
|
|
int(time.time()),
|
|
|
|
'yes')
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
# Check to see whether we happen to be awaiting this
|
|
|
|
# pubkey in order to send a message. If we are, it will do the POW
|
|
|
|
# and send it.
|
2015-03-09 07:35:32 +01:00
|
|
|
self.possibleNewPubkey(fromAddress)
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2013-11-20 07:29:37 +01:00
|
|
|
# If this message is bound for one of my version 3 addresses (or
|
|
|
|
# higher), then we must check to make sure it meets our demanded
|
2014-01-17 02:10:04 +01:00
|
|
|
# proof of work requirement. If this is bound for one of my chan
|
|
|
|
# addresses then we skip this check; the minimum network POW is
|
|
|
|
# fine.
|
2018-05-17 11:38:46 +02:00
|
|
|
# If the toAddress version number is 3 or higher and not one of
|
|
|
|
# my chan addresses:
|
|
|
|
if decodeAddress(toAddress)[1] >= 3 \
|
|
|
|
and not BMConfigParser().safeGetBoolean(toAddress, 'chan'):
|
|
|
|
# If I'm not friendly with this person:
|
2019-12-30 13:10:02 +01:00
|
|
|
if not shared.isAddressInMyAddressBookSubscriptionsListOrWhitelist(
|
|
|
|
fromAddress):
|
2017-01-11 14:27:19 +01:00
|
|
|
requiredNonceTrialsPerByte = BMConfigParser().getint(
|
2013-11-20 07:29:37 +01:00
|
|
|
toAddress, 'noncetrialsperbyte')
|
2017-01-11 14:27:19 +01:00
|
|
|
requiredPayloadLengthExtraBytes = BMConfigParser().getint(
|
2013-11-20 07:29:37 +01:00
|
|
|
toAddress, 'payloadlengthextrabytes')
|
2018-05-17 11:38:46 +02:00
|
|
|
if not protocol.isProofOfWorkSufficient(
|
|
|
|
data, requiredNonceTrialsPerByte,
|
|
|
|
requiredPayloadLengthExtraBytes):
|
|
|
|
logger.info(
|
|
|
|
'Proof of work in msg is insufficient only because'
|
|
|
|
' it does not meet our higher requirement.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2018-05-17 11:38:46 +02:00
|
|
|
# Gets set to True if the user shouldn't see the message according
|
|
|
|
# to black or white lists.
|
|
|
|
blockMessage = False
|
|
|
|
# If we are using a blacklist
|
|
|
|
if BMConfigParser().get(
|
|
|
|
'bitmessagesettings', 'blackwhitelist') == 'black':
|
2013-11-20 07:29:37 +01:00
|
|
|
queryreturn = sqlQuery(
|
2018-05-17 11:38:46 +02:00
|
|
|
"SELECT label FROM blacklist where address=? and enabled='1'",
|
2013-11-20 07:29:37 +01:00
|
|
|
fromAddress)
|
|
|
|
if queryreturn != []:
|
2014-01-17 02:10:04 +01:00
|
|
|
logger.info('Message ignored because address is in blacklist.')
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
blockMessage = True
|
|
|
|
else: # We're using a whitelist
|
|
|
|
queryreturn = sqlQuery(
|
2018-05-17 11:38:46 +02:00
|
|
|
"SELECT label FROM whitelist where address=? and enabled='1'",
|
2013-11-20 07:29:37 +01:00
|
|
|
fromAddress)
|
|
|
|
if queryreturn == []:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Message ignored because address not in whitelist.')
|
2013-11-20 07:29:37 +01:00
|
|
|
blockMessage = True
|
2016-11-14 20:23:58 +01:00
|
|
|
|
2017-01-11 14:27:19 +01:00
|
|
|
toLabel = BMConfigParser().get(toAddress, 'label')
|
2014-07-26 19:15:28 +02:00
|
|
|
if toLabel == '':
|
|
|
|
toLabel = toAddress
|
|
|
|
|
2018-02-13 13:24:37 +01:00
|
|
|
try:
|
2018-05-17 11:38:46 +02:00
|
|
|
decodedMessage = helper_msgcoding.MsgDecode(
|
|
|
|
messageEncodingType, message)
|
2018-02-13 13:24:37 +01:00
|
|
|
except helper_msgcoding.MsgDecodeException:
|
|
|
|
return
|
2016-11-14 20:23:58 +01:00
|
|
|
subject = decodedMessage.subject
|
|
|
|
body = decodedMessage.body
|
|
|
|
|
2014-07-26 19:15:28 +02:00
|
|
|
# Let us make sure that we haven't already received this message
|
2015-02-21 03:03:20 +01:00
|
|
|
if helper_inbox.isMessageAlreadyInInbox(sigHash):
|
2014-07-26 19:15:28 +02:00
|
|
|
logger.info('This msg is already in our inbox. Ignoring it.')
|
|
|
|
blockMessage = True
|
2013-11-20 07:29:37 +01:00
|
|
|
if not blockMessage:
|
|
|
|
if messageEncodingType != 0:
|
2018-05-17 11:38:46 +02:00
|
|
|
t = (inventoryHash, toAddress, fromAddress, subject,
|
|
|
|
int(time.time()), body, 'inbox', messageEncodingType,
|
|
|
|
0, sigHash)
|
2013-11-20 07:29:37 +01:00
|
|
|
helper_inbox.insert(t)
|
|
|
|
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put(('displayNewInboxMessage', (
|
2013-11-20 07:29:37 +01:00
|
|
|
inventoryHash, toAddress, fromAddress, subject, body)))
|
|
|
|
|
|
|
|
# If we are behaving as an API then we might need to run an
|
|
|
|
# outside command to let some program know that a new message
|
|
|
|
# has arrived.
|
2018-05-17 11:38:46 +02:00
|
|
|
if BMConfigParser().safeGetBoolean(
|
|
|
|
'bitmessagesettings', 'apienabled'):
|
2013-11-20 07:29:37 +01:00
|
|
|
try:
|
2017-01-11 14:27:19 +01:00
|
|
|
apiNotifyPath = BMConfigParser().get(
|
2013-11-20 07:29:37 +01:00
|
|
|
'bitmessagesettings', 'apinotifypath')
|
|
|
|
except:
|
|
|
|
apiNotifyPath = ''
|
|
|
|
if apiNotifyPath != '':
|
2018-05-08 13:13:49 +02:00
|
|
|
call([apiNotifyPath, "newMessage"])
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
# Let us now check and see whether our receiving address is
|
|
|
|
# behaving as a mailing list
|
2018-05-17 11:38:46 +02:00
|
|
|
if BMConfigParser().safeGetBoolean(toAddress, 'mailinglist') \
|
|
|
|
and messageEncodingType != 0:
|
2013-11-20 07:29:37 +01:00
|
|
|
try:
|
2017-01-11 14:27:19 +01:00
|
|
|
mailingListName = BMConfigParser().get(
|
2013-11-20 07:29:37 +01:00
|
|
|
toAddress, 'mailinglistname')
|
|
|
|
except:
|
|
|
|
mailingListName = ''
|
|
|
|
# Let us send out this message as a broadcast
|
|
|
|
subject = self.addMailingListNameToSubject(
|
|
|
|
subject, mailingListName)
|
|
|
|
# Let us now send this message out as a broadcast
|
2018-05-17 11:38:46 +02:00
|
|
|
message = time.strftime(
|
|
|
|
"%a, %Y-%m-%d %H:%M:%S UTC", time.gmtime()
|
|
|
|
) + ' Message ostensibly from ' + fromAddress \
|
|
|
|
+ ':\n\n' + body
|
|
|
|
# The fromAddress for the broadcast that we are about to
|
|
|
|
# send is the toAddress (my address) for the msg message
|
|
|
|
# we are currently processing.
|
|
|
|
fromAddress = toAddress
|
|
|
|
# We don't actually need the ackdata for acknowledgement
|
|
|
|
# since this is a broadcast message but we can use it to
|
|
|
|
# update the user interface when the POW is done generating.
|
2018-02-09 00:49:08 +01:00
|
|
|
streamNumber = decodeAddress(fromAddress)[2]
|
|
|
|
|
2017-09-30 11:19:44 +02:00
|
|
|
ackdata = genAckPayload(streamNumber, 0)
|
2013-11-20 07:29:37 +01:00
|
|
|
toAddress = '[Broadcast subscribers]'
|
|
|
|
ripe = ''
|
|
|
|
|
2015-03-09 07:35:32 +01:00
|
|
|
# We really should have a discussion about how to
|
|
|
|
# set the TTL for mailing list broadcasts. This is obviously
|
2018-05-02 17:29:55 +02:00
|
|
|
# hard-coded.
|
2019-11-04 15:45:36 +01:00
|
|
|
TTL = 2 * 7 * 24 * 60 * 60 # 2 weeks
|
2018-05-02 17:29:55 +02:00
|
|
|
t = ('',
|
|
|
|
toAddress,
|
|
|
|
ripe,
|
|
|
|
fromAddress,
|
|
|
|
subject,
|
|
|
|
message,
|
|
|
|
ackdata,
|
2018-05-17 11:38:46 +02:00
|
|
|
int(time.time()), # sentTime (this doesn't change)
|
|
|
|
int(time.time()), # lastActionTime
|
2018-05-02 17:29:55 +02:00
|
|
|
0,
|
|
|
|
'broadcastqueued',
|
|
|
|
0,
|
|
|
|
'sent',
|
|
|
|
messageEncodingType,
|
2015-03-09 07:35:32 +01:00
|
|
|
TTL)
|
2013-11-20 07:29:37 +01:00
|
|
|
helper_sent.insert(t)
|
|
|
|
|
2018-05-17 11:38:46 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'displayNewSentMessage', (
|
|
|
|
toAddress, '[Broadcast subscribers]', fromAddress,
|
|
|
|
subject, message, ackdata)
|
|
|
|
))
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.workerQueue.put(('sendbroadcast', ''))
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2018-05-17 11:38:46 +02:00
|
|
|
# Don't send ACK if invalid, blacklisted senders, invisible
|
|
|
|
# messages, disabled or chan
|
2018-10-12 23:12:00 +02:00
|
|
|
if (
|
|
|
|
self.ackDataHasAValidHeader(ackData) and not blockMessage and
|
|
|
|
messageEncodingType != 0 and
|
|
|
|
not BMConfigParser().safeGetBoolean(toAddress, 'dontsendack') and
|
|
|
|
not BMConfigParser().safeGetBoolean(toAddress, 'chan')
|
|
|
|
):
|
|
|
|
self._ack_obj.send_data(ackData[24:])
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
# Display timing data
|
|
|
|
timeRequiredToAttemptToDecryptMessage = time.time(
|
|
|
|
) - messageProcessingStartTime
|
2019-02-01 18:17:49 +01:00
|
|
|
self.successfullyDecryptMessageTimings.append(
|
2013-11-20 07:29:37 +01:00
|
|
|
timeRequiredToAttemptToDecryptMessage)
|
2018-05-02 17:29:55 +02:00
|
|
|
timing_sum = 0
|
2019-02-01 18:17:49 +01:00
|
|
|
for item in self.successfullyDecryptMessageTimings:
|
2018-05-17 11:38:46 +02:00
|
|
|
timing_sum += item
|
|
|
|
logger.debug(
|
|
|
|
'Time to decrypt this message successfully: %s'
|
|
|
|
'\nAverage time for all message decryption successes since'
|
|
|
|
' startup: %s.',
|
|
|
|
timeRequiredToAttemptToDecryptMessage,
|
2019-02-01 18:17:49 +01:00
|
|
|
timing_sum / len(self.successfullyDecryptMessageTimings)
|
2018-05-17 11:38:46 +02:00
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
|
|
|
def processbroadcast(self, data):
|
2019-11-04 15:45:36 +01:00
|
|
|
"""Process a broadcast object"""
|
2013-11-20 07:29:37 +01:00
|
|
|
messageProcessingStartTime = time.time()
|
|
|
|
shared.numberOfBroadcastsProcessed += 1
|
2017-10-19 08:39:09 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateNumberOfBroadcastsProcessed', 'no data'))
|
2013-11-20 07:29:37 +01:00
|
|
|
inventoryHash = calculateInventoryHash(data)
|
2014-08-27 09:14:32 +02:00
|
|
|
readPosition = 20 # bypass the nonce, time, and object type
|
2013-11-20 07:29:37 +01:00
|
|
|
broadcastVersion, broadcastVersionLength = decodeVarint(
|
|
|
|
data[readPosition:readPosition + 9])
|
|
|
|
readPosition += broadcastVersionLength
|
2014-12-25 09:57:34 +01:00
|
|
|
if broadcastVersion < 4 or broadcastVersion > 5:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Cannot decode incoming broadcast versions less than 4'
|
|
|
|
' or higher than 5. Assuming the sender isn\'t being silly,'
|
|
|
|
' you should upgrade Bitmessage because this message shall'
|
|
|
|
' be ignored.'
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2014-12-25 09:57:34 +01:00
|
|
|
cleartextStreamNumber, cleartextStreamNumberLength = decodeVarint(
|
|
|
|
data[readPosition:readPosition + 10])
|
|
|
|
readPosition += cleartextStreamNumberLength
|
|
|
|
if broadcastVersion == 4:
|
2018-05-17 11:38:46 +02:00
|
|
|
# v4 broadcasts are encrypted the same way the msgs are
|
|
|
|
# encrypted. To see if we are interested in a v4 broadcast,
|
|
|
|
# we try to decrypt it. This was replaced with v5 broadcasts
|
|
|
|
# which include a tag which we check instead, just like we do
|
|
|
|
# with v4 pubkeys.
|
2014-12-25 09:57:34 +01:00
|
|
|
signedData = data[8:readPosition]
|
2013-11-20 07:29:37 +01:00
|
|
|
initialDecryptionSuccessful = False
|
2018-05-17 11:38:46 +02:00
|
|
|
for key, cryptorObject in sorted(
|
|
|
|
shared.MyECSubscriptionCryptorObjects.items(),
|
|
|
|
key=lambda x: random.random()):
|
2013-11-20 07:29:37 +01:00
|
|
|
try:
|
2018-05-17 11:38:46 +02:00
|
|
|
# continue decryption attempts to avoid timing attacks
|
|
|
|
if initialDecryptionSuccessful:
|
2016-02-18 16:01:06 +01:00
|
|
|
cryptorObject.decrypt(data[readPosition:])
|
|
|
|
else:
|
2018-05-17 11:38:46 +02:00
|
|
|
decryptedData = cryptorObject.decrypt(
|
|
|
|
data[readPosition:])
|
|
|
|
# This is the RIPE hash of the sender's pubkey.
|
|
|
|
# We need this below to compare to the RIPE hash
|
|
|
|
# of the sender's address to verify that it was
|
|
|
|
# encrypted by with their key rather than some
|
|
|
|
# other key.
|
|
|
|
toRipe = key
|
2016-02-18 16:01:06 +01:00
|
|
|
initialDecryptionSuccessful = True
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'EC decryption successful using key associated'
|
|
|
|
' with ripe hash: %s', hexlify(key))
|
2018-05-02 17:29:55 +02:00
|
|
|
except Exception:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'cryptorObject.decrypt Exception:', exc_info=True)
|
2013-11-20 07:29:37 +01:00
|
|
|
if not initialDecryptionSuccessful:
|
|
|
|
# This is not a broadcast I am interested in.
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'Length of time program spent failing to decrypt this'
|
|
|
|
' v4 broadcast: %s seconds.',
|
|
|
|
time.time() - messageProcessingStartTime)
|
2013-11-14 04:45:10 +01:00
|
|
|
return
|
2014-12-25 09:57:34 +01:00
|
|
|
elif broadcastVersion == 5:
|
2018-10-19 09:12:48 +02:00
|
|
|
embeddedTag = data[readPosition:readPosition + 32]
|
2013-11-20 07:29:37 +01:00
|
|
|
readPosition += 32
|
2020-01-06 12:14:13 +01:00
|
|
|
if bytes(embeddedTag) not in shared.MyECSubscriptionCryptorObjects:
|
2018-05-02 17:29:55 +02:00
|
|
|
logger.debug('We\'re not interested in this broadcast.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
|
|
|
# We are interested in this broadcast because of its tag.
|
2018-05-17 11:38:46 +02:00
|
|
|
# We're going to add some more data which is signed further down.
|
|
|
|
signedData = data[8:readPosition]
|
2013-11-20 07:29:37 +01:00
|
|
|
cryptorObject = shared.MyECSubscriptionCryptorObjects[embeddedTag]
|
|
|
|
try:
|
|
|
|
decryptedData = cryptorObject.decrypt(data[readPosition:])
|
2014-01-17 02:10:04 +01:00
|
|
|
logger.debug('EC decryption successful')
|
2018-05-02 17:29:55 +02:00
|
|
|
except Exception:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'Broadcast version %s decryption Unsuccessful.',
|
|
|
|
broadcastVersion)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2014-12-25 09:57:34 +01:00
|
|
|
# At this point this is a broadcast I have decrypted and am
|
|
|
|
# interested in.
|
|
|
|
readPosition = 0
|
|
|
|
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 9])
|
|
|
|
if broadcastVersion == 4:
|
|
|
|
if sendersAddressVersion < 2 or sendersAddressVersion > 3:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.warning(
|
|
|
|
'Cannot decode senderAddressVersion other than 2 or 3.'
|
|
|
|
' Assuming the sender isn\'t being silly, you should'
|
|
|
|
' upgrade Bitmessage because this message shall be'
|
|
|
|
' ignored.'
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2014-12-25 09:57:34 +01:00
|
|
|
elif broadcastVersion == 5:
|
|
|
|
if sendersAddressVersion < 4:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Cannot decode senderAddressVersion less than 4 for'
|
|
|
|
' broadcast version number 5. Assuming the sender'
|
|
|
|
' isn\'t being silly, you should upgrade Bitmessage'
|
|
|
|
' because this message shall be ignored.'
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2014-12-25 09:57:34 +01:00
|
|
|
readPosition += sendersAddressVersionLength
|
|
|
|
sendersStream, sendersStreamLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 9])
|
|
|
|
if sendersStream != cleartextStreamNumber:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'The stream number outside of the encryption on which the'
|
|
|
|
' POW was completed doesn\'t match the stream number'
|
|
|
|
' inside the encryption. Ignoring broadcast.'
|
|
|
|
)
|
2014-12-25 09:57:34 +01:00
|
|
|
return
|
|
|
|
readPosition += sendersStreamLength
|
|
|
|
readPosition += 4
|
|
|
|
sendersPubSigningKey = '\x04' + \
|
|
|
|
decryptedData[readPosition:readPosition + 64]
|
|
|
|
readPosition += 64
|
|
|
|
sendersPubEncryptionKey = '\x04' + \
|
|
|
|
decryptedData[readPosition:readPosition + 64]
|
|
|
|
readPosition += 64
|
|
|
|
if sendersAddressVersion >= 3:
|
2018-05-17 11:38:46 +02:00
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte, varintLength = \
|
|
|
|
decodeVarint(decryptedData[readPosition:readPosition + 10])
|
2014-12-25 09:57:34 +01:00
|
|
|
readPosition += varintLength
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'sender\'s requiredAverageProofOfWorkNonceTrialsPerByte'
|
|
|
|
' is %s', requiredAverageProofOfWorkNonceTrialsPerByte)
|
2014-12-25 09:57:34 +01:00
|
|
|
requiredPayloadLengthExtraBytes, varintLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 10])
|
|
|
|
readPosition += varintLength
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'sender\'s requiredPayloadLengthExtraBytes is %s',
|
|
|
|
requiredPayloadLengthExtraBytes)
|
2014-12-25 09:57:34 +01:00
|
|
|
endOfPubkeyPosition = readPosition
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
sha = hashlib.new('sha512')
|
|
|
|
sha.update(sendersPubSigningKey + sendersPubEncryptionKey)
|
2019-01-31 16:42:22 +01:00
|
|
|
calculatedRipe = RIPEMD160Hash(sha.digest()).digest()
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
if broadcastVersion == 4:
|
|
|
|
if toRipe != calculatedRipe:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'The encryption key used to encrypt this message'
|
|
|
|
' doesn\'t match the keys inbedded in the message'
|
|
|
|
' itself. Ignoring message.'
|
|
|
|
)
|
2014-12-25 09:57:34 +01:00
|
|
|
return
|
|
|
|
elif broadcastVersion == 5:
|
2018-10-19 09:12:48 +02:00
|
|
|
calculatedTag = hashlib.sha512(hashlib.sha512(
|
|
|
|
encodeVarint(sendersAddressVersion) +
|
|
|
|
encodeVarint(sendersStream) + calculatedRipe
|
|
|
|
).digest()).digest()[32:]
|
2013-11-20 07:29:37 +01:00
|
|
|
if calculatedTag != embeddedTag:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'The tag and encryption key used to encrypt this'
|
|
|
|
' message doesn\'t match the keys inbedded in the'
|
|
|
|
' message itself. Ignoring message.'
|
|
|
|
)
|
2013-11-20 07:29:37 +01:00
|
|
|
return
|
2014-12-25 09:57:34 +01:00
|
|
|
messageEncodingType, messageEncodingTypeLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 9])
|
|
|
|
if messageEncodingType == 0:
|
|
|
|
return
|
|
|
|
readPosition += messageEncodingTypeLength
|
|
|
|
messageLength, messageLengthLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 9])
|
|
|
|
readPosition += messageLengthLength
|
|
|
|
message = decryptedData[readPosition:readPosition + messageLength]
|
|
|
|
readPosition += messageLength
|
|
|
|
readPositionAtBottomOfMessage = readPosition
|
|
|
|
signatureLength, signatureLengthLength = decodeVarint(
|
|
|
|
decryptedData[readPosition:readPosition + 9])
|
|
|
|
readPosition += signatureLengthLength
|
|
|
|
signature = decryptedData[
|
|
|
|
readPosition:readPosition + signatureLength]
|
|
|
|
signedData += decryptedData[:readPositionAtBottomOfMessage]
|
2018-05-17 11:38:46 +02:00
|
|
|
if not highlevelcrypto.verify(
|
|
|
|
signedData, signature, hexlify(sendersPubSigningKey)):
|
2014-12-25 09:57:34 +01:00
|
|
|
logger.debug('ECDSA verify failed')
|
|
|
|
return
|
|
|
|
logger.debug('ECDSA verify passed')
|
2018-05-17 11:38:46 +02:00
|
|
|
# Used to detect and ignore duplicate messages in our inbox
|
|
|
|
sigHash = hashlib.sha512(
|
|
|
|
hashlib.sha512(signature).digest()).digest()[32:]
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
fromAddress = encodeAddress(
|
|
|
|
sendersAddressVersion, sendersStream, calculatedRipe)
|
2019-11-04 15:45:36 +01:00
|
|
|
logger.info('fromAddress: %s', fromAddress)
|
2014-12-25 09:57:34 +01:00
|
|
|
|
|
|
|
# Let's store the public key in case we want to reply to this person.
|
|
|
|
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''',
|
2015-03-09 07:35:32 +01:00
|
|
|
fromAddress,
|
2014-12-25 09:57:34 +01:00
|
|
|
sendersAddressVersion,
|
|
|
|
decryptedData[:endOfPubkeyPosition],
|
|
|
|
int(time.time()),
|
|
|
|
'yes')
|
|
|
|
|
|
|
|
# Check to see whether we happen to be awaiting this
|
|
|
|
# pubkey in order to send a message. If we are, it will do the POW
|
|
|
|
# and send it.
|
2015-03-09 07:35:32 +01:00
|
|
|
self.possibleNewPubkey(fromAddress)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
fromAddress = encodeAddress(
|
|
|
|
sendersAddressVersion, sendersStream, calculatedRipe)
|
2019-11-04 15:45:36 +01:00
|
|
|
logger.debug('fromAddress: %s', fromAddress)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2018-02-13 13:24:37 +01:00
|
|
|
try:
|
2018-05-17 11:38:46 +02:00
|
|
|
decodedMessage = helper_msgcoding.MsgDecode(
|
|
|
|
messageEncodingType, message)
|
2018-02-13 13:24:37 +01:00
|
|
|
except helper_msgcoding.MsgDecodeException:
|
|
|
|
return
|
2016-11-14 20:23:58 +01:00
|
|
|
subject = decodedMessage.subject
|
|
|
|
body = decodedMessage.body
|
2014-12-25 09:57:34 +01:00
|
|
|
|
|
|
|
toAddress = '[Broadcast subscribers]'
|
2015-02-21 03:03:20 +01:00
|
|
|
if helper_inbox.isMessageAlreadyInInbox(sigHash):
|
|
|
|
logger.info('This broadcast is already in our inbox. Ignoring it.')
|
|
|
|
return
|
|
|
|
t = (inventoryHash, toAddress, fromAddress, subject, int(
|
|
|
|
time.time()), body, 'inbox', messageEncodingType, 0, sigHash)
|
|
|
|
helper_inbox.insert(t)
|
2013-11-20 07:29:37 +01:00
|
|
|
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put(('displayNewInboxMessage', (
|
2015-02-21 03:03:20 +01:00
|
|
|
inventoryHash, toAddress, fromAddress, subject, body)))
|
2014-12-25 09:57:34 +01:00
|
|
|
|
2015-02-21 03:03:20 +01:00
|
|
|
# If we are behaving as an API then we might need to run an
|
|
|
|
# outside command to let some program know that a new message
|
|
|
|
# has arrived.
|
2017-01-11 14:27:19 +01:00
|
|
|
if BMConfigParser().safeGetBoolean('bitmessagesettings', 'apienabled'):
|
2015-02-21 03:03:20 +01:00
|
|
|
try:
|
2017-01-11 14:27:19 +01:00
|
|
|
apiNotifyPath = BMConfigParser().get(
|
2015-02-21 03:03:20 +01:00
|
|
|
'bitmessagesettings', 'apinotifypath')
|
|
|
|
except:
|
|
|
|
apiNotifyPath = ''
|
|
|
|
if apiNotifyPath != '':
|
2018-05-08 13:13:49 +02:00
|
|
|
call([apiNotifyPath, "newBroadcast"])
|
2014-12-25 09:57:34 +01:00
|
|
|
|
|
|
|
# Display timing data
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'Time spent processing this interesting broadcast: %s',
|
|
|
|
time.time() - messageProcessingStartTime)
|
2013-11-14 04:45:10 +01:00
|
|
|
|
2015-03-09 07:35:32 +01:00
|
|
|
def possibleNewPubkey(self, address):
|
2014-12-25 09:57:34 +01:00
|
|
|
"""
|
2018-05-17 11:38:46 +02:00
|
|
|
We have inserted a pubkey into our pubkey table which we received
|
|
|
|
from a pubkey, msg, or broadcast message. It might be one that we
|
|
|
|
have been waiting for. Let's check.
|
2014-12-25 09:57:34 +01:00
|
|
|
"""
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2018-05-17 11:38:46 +02:00
|
|
|
# For address versions <= 3, we wait on a key with the correct
|
|
|
|
# address version, stream number and RIPE hash.
|
2018-05-02 17:29:55 +02:00
|
|
|
_, addressVersion, streamNumber, ripe = decodeAddress(address)
|
2018-05-17 11:38:46 +02:00
|
|
|
if addressVersion <= 3:
|
2017-01-14 23:20:15 +01:00
|
|
|
if address in state.neededPubkeys:
|
|
|
|
del state.neededPubkeys[address]
|
2015-03-09 07:35:32 +01:00
|
|
|
self.sendMessages(address)
|
2013-11-14 04:45:10 +01:00
|
|
|
else:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.debug(
|
|
|
|
'We don\'t need this pub key. We didn\'t ask for it.'
|
|
|
|
' For address: %s', address)
|
2013-11-14 04:45:10 +01:00
|
|
|
# For address versions >= 4, we wait on a pubkey with the correct tag.
|
|
|
|
# Let us create the tag from the address and see if we were waiting
|
|
|
|
# for it.
|
2015-03-09 07:35:32 +01:00
|
|
|
elif addressVersion >= 4:
|
2018-05-17 11:38:46 +02:00
|
|
|
tag = hashlib.sha512(hashlib.sha512(
|
2019-10-27 14:15:45 +01:00
|
|
|
encodeVarint(addressVersion) + encodeVarint(streamNumber)
|
|
|
|
+ ripe
|
2018-10-19 09:12:48 +02:00
|
|
|
).digest()).digest()[32:]
|
2017-01-14 23:20:15 +01:00
|
|
|
if tag in state.neededPubkeys:
|
|
|
|
del state.neededPubkeys[tag]
|
2015-03-09 07:35:32 +01:00
|
|
|
self.sendMessages(address)
|
2014-12-25 09:57:34 +01:00
|
|
|
|
2019-11-04 15:45:36 +01:00
|
|
|
@staticmethod
|
|
|
|
def sendMessages(address):
|
2014-12-25 09:57:34 +01:00
|
|
|
"""
|
2019-10-27 14:15:45 +01:00
|
|
|
This method is called by the `possibleNewPubkey` when it sees
|
|
|
|
that we now have the necessary pubkey to send one or more messages.
|
2014-12-25 09:57:34 +01:00
|
|
|
"""
|
|
|
|
logger.info('We have been awaiting the arrival of this pubkey.')
|
|
|
|
sqlExecute(
|
2018-05-17 11:38:46 +02:00
|
|
|
"UPDATE sent SET status='doingmsgpow', retrynumber=0"
|
|
|
|
" WHERE toaddress=?"
|
|
|
|
" AND (status='awaitingpubkey' OR status='doingpubkeypow')"
|
|
|
|
" AND folder='sent'", address)
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.workerQueue.put(('sendmessage', ''))
|
2013-11-14 04:45:10 +01:00
|
|
|
|
2019-11-04 15:45:36 +01:00
|
|
|
@staticmethod
|
|
|
|
def ackDataHasAValidHeader(ackData):
|
|
|
|
"""Checking ackData with valid Header, not sending ackData when false"""
|
2017-01-11 14:27:19 +01:00
|
|
|
if len(ackData) < protocol.Header.size:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'The length of ackData is unreasonably short. Not sending'
|
|
|
|
' ackData.')
|
2013-11-14 04:45:10 +01:00
|
|
|
return False
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2018-05-17 11:38:46 +02:00
|
|
|
magic, command, payloadLength, checksum = protocol.Header.unpack(
|
|
|
|
ackData[:protocol.Header.size])
|
2014-05-22 15:08:30 +02:00
|
|
|
if magic != 0xE9BEB4D9:
|
2014-01-17 02:10:04 +01:00
|
|
|
logger.info('Ackdata magic bytes were wrong. Not sending ackData.')
|
2013-11-14 04:45:10 +01:00
|
|
|
return False
|
2017-01-11 14:27:19 +01:00
|
|
|
payload = ackData[protocol.Header.size:]
|
2014-08-27 09:14:32 +02:00
|
|
|
if len(payload) != payloadLength:
|
2018-05-17 11:38:46 +02:00
|
|
|
logger.info(
|
|
|
|
'ackData payload length doesn\'t match the payload length'
|
|
|
|
' specified in the header. Not sending ackdata.')
|
2013-11-14 04:45:10 +01:00
|
|
|
return False
|
2018-05-17 11:38:46 +02:00
|
|
|
# ~1.6 MB which is the maximum possible size of an inv message.
|
|
|
|
if payloadLength > 1600100:
|
|
|
|
# The largest message should be either an inv or a getdata
|
|
|
|
# message at 1.6 MB in size.
|
2018-05-02 17:29:55 +02:00
|
|
|
# That doesn't mean that the object may be that big. The
|
2018-05-17 11:38:46 +02:00
|
|
|
# shared.checkAndShareObjectWithPeers function will verify
|
|
|
|
# that it is no larger than 2^18 bytes.
|
2013-11-20 07:29:37 +01:00
|
|
|
return False
|
2018-05-17 11:38:46 +02:00
|
|
|
# test the checksum in the message.
|
|
|
|
if checksum != hashlib.sha512(payload).digest()[0:4]:
|
2014-05-22 15:08:30 +02:00
|
|
|
logger.info('ackdata checksum wrong. Not sending ackdata.')
|
2013-11-20 07:29:37 +01:00
|
|
|
return False
|
2014-08-27 09:14:32 +02:00
|
|
|
command = command.rstrip('\x00')
|
|
|
|
if command != 'object':
|
2013-11-14 04:45:10 +01:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2019-11-04 15:45:36 +01:00
|
|
|
@staticmethod
|
|
|
|
def addMailingListNameToSubject(subject, mailingListName):
|
|
|
|
"""Adding mailingListName to subject"""
|
2013-11-14 04:45:10 +01:00
|
|
|
subject = subject.strip()
|
|
|
|
if subject[:3] == 'Re:' or subject[:3] == 'RE:':
|
|
|
|
subject = subject[3:].strip()
|
|
|
|
if '[' + mailingListName + ']' in subject:
|
|
|
|
return subject
|
2019-11-04 15:45:36 +01:00
|
|
|
return '[' + mailingListName + '] ' + subject
|