2018-10-10 12:49:48 +02:00
|
|
|
"""
|
2019-11-04 15:51:22 +01:00
|
|
|
Thread for performing PoW
|
2018-10-10 12:49:48 +02:00
|
|
|
"""
|
2020-01-15 11:47:26 +01:00
|
|
|
# pylint: disable=protected-access,too-many-branches,too-many-statements
|
|
|
|
# pylint: disable=no-self-use,too-many-lines,too-many-locals
|
2018-10-10 12:49:48 +02:00
|
|
|
|
2015-01-21 18:38:25 +01:00
|
|
|
from __future__ import division
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
import hashlib
|
2018-10-10 12:49:48 +02:00
|
|
|
import time
|
|
|
|
from binascii import hexlify, unhexlify
|
2017-09-21 17:24:51 +02:00
|
|
|
from struct import pack
|
2018-05-02 17:29:55 +02:00
|
|
|
from subprocess import call # nosec
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2018-10-10 12:49:48 +02:00
|
|
|
import defaults
|
|
|
|
import helper_inbox
|
|
|
|
import helper_msgcoding
|
|
|
|
import helper_random
|
2021-01-01 13:51:35 +01:00
|
|
|
import helper_sql
|
2018-10-10 12:49:48 +02:00
|
|
|
import highlevelcrypto
|
2014-08-06 04:01:01 +02:00
|
|
|
import l10n
|
2018-10-10 12:49:48 +02:00
|
|
|
import proofofwork
|
2017-01-11 14:27:19 +01:00
|
|
|
import protocol
|
2017-02-08 13:41:56 +01:00
|
|
|
import queues
|
2017-09-21 17:24:51 +02:00
|
|
|
import shared
|
2018-10-10 12:49:48 +02:00
|
|
|
import state
|
|
|
|
import tr
|
2020-01-17 14:27:36 +01:00
|
|
|
from addresses import (
|
|
|
|
calculateInventoryHash, decodeAddress, decodeVarint, encodeVarint
|
|
|
|
)
|
2022-01-28 13:55:23 +01:00
|
|
|
from bmconfigparser import config
|
2018-10-10 12:49:48 +02:00
|
|
|
from helper_sql import sqlExecute, sqlQuery
|
|
|
|
from inventory import Inventory
|
2020-07-16 16:05:32 +02:00
|
|
|
from network import knownnodes, StoppableThread
|
2021-08-16 17:43:44 +02:00
|
|
|
from six.moves import configparser, queue
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
|
2015-11-26 02:38:55 +01:00
|
|
|
def sizeof_fmt(num, suffix='h/s'):
|
2018-10-10 12:49:48 +02:00
|
|
|
"""Format hashes per seconds nicely (SI prefix)"""
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
|
2015-11-26 02:38:55 +01:00
|
|
|
if abs(num) < 1000.0:
|
|
|
|
return "%3.1f%s%s" % (num, unit, suffix)
|
|
|
|
num /= 1024.0
|
|
|
|
return "%.1f%s%s" % (num, 'Yi', suffix)
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2019-08-01 13:37:26 +02:00
|
|
|
class singleWorker(StoppableThread):
|
2018-10-10 12:49:48 +02:00
|
|
|
"""Thread for performing PoW"""
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
def __init__(self):
|
2019-08-01 13:37:26 +02:00
|
|
|
super(singleWorker, self).__init__(name="singleWorker")
|
2017-08-15 12:24:43 +02:00
|
|
|
proofofwork.init()
|
2015-11-24 01:55:17 +01:00
|
|
|
|
|
|
|
def stopThread(self):
|
2018-10-10 12:49:48 +02:00
|
|
|
"""Signal through the queue that the thread should be stopped"""
|
|
|
|
|
2015-11-24 01:55:17 +01:00
|
|
|
try:
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.workerQueue.put(("stopThread", "data"))
|
2021-08-16 17:43:44 +02:00
|
|
|
except queue.Full:
|
|
|
|
self.logger.error('workerQueue is Full')
|
2015-11-24 01:55:17 +01:00
|
|
|
super(singleWorker, self).stopThread()
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
def run(self):
|
2018-10-10 12:49:48 +02:00
|
|
|
# pylint: disable=attribute-defined-outside-init
|
2017-02-26 20:44:56 +01:00
|
|
|
|
2021-01-01 13:51:35 +01:00
|
|
|
while not helper_sql.sql_ready.wait(1.0) and state.shutdown == 0:
|
|
|
|
self.stop.wait(1.0)
|
2017-02-26 20:44:56 +01:00
|
|
|
if state.shutdown > 0:
|
|
|
|
return
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2015-03-09 07:35:32 +01:00
|
|
|
# Initialize the neededPubkeys dictionary.
|
2013-08-29 13:27:30 +02:00
|
|
|
queryreturn = sqlQuery(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''SELECT DISTINCT toaddress FROM sent'''
|
|
|
|
''' WHERE (status='awaitingpubkey' AND folder='sent')''')
|
2013-06-21 23:32:22 +02:00
|
|
|
for row in queryreturn:
|
2014-08-27 09:14:32 +02:00
|
|
|
toAddress, = row
|
2018-03-22 12:23:36 +01:00
|
|
|
# toStatus
|
|
|
|
_, toAddressVersionNumber, toStreamNumber, toRipe = \
|
2017-09-21 17:24:51 +02:00
|
|
|
decodeAddress(toAddress)
|
|
|
|
if toAddressVersionNumber <= 3:
|
2017-01-14 23:20:15 +01:00
|
|
|
state.neededPubkeys[toAddress] = 0
|
2013-09-13 06:27:34 +02:00
|
|
|
elif toAddressVersionNumber >= 4:
|
2017-09-21 17:24:51 +02:00
|
|
|
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(
|
2021-08-16 18:39:16 +02:00
|
|
|
encodeVarint(toAddressVersionNumber)
|
|
|
|
+ encodeVarint(toStreamNumber) + toRipe
|
2017-09-21 17:24:51 +02:00
|
|
|
).digest()).digest()
|
|
|
|
# Note that this is the first half of the sha512 hash.
|
|
|
|
privEncryptionKey = doubleHashOfAddressData[:32]
|
2013-09-15 03:06:26 +02:00
|
|
|
tag = doubleHashOfAddressData[32:]
|
2017-09-21 17:24:51 +02:00
|
|
|
# We'll need this for when we receive a pubkey reply:
|
|
|
|
# it will be encrypted and we'll need to decrypt it.
|
|
|
|
state.neededPubkeys[tag] = (
|
|
|
|
toAddress,
|
|
|
|
highlevelcrypto.makeCryptor(
|
|
|
|
hexlify(privEncryptionKey))
|
|
|
|
)
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2019-11-05 17:54:04 +01:00
|
|
|
# Initialize the state.ackdataForWhichImWatching data structure
|
2013-08-29 13:27:30 +02:00
|
|
|
queryreturn = sqlQuery(
|
2020-09-19 13:58:23 +02:00
|
|
|
'''SELECT ackdata FROM sent WHERE status = 'msgsent' AND folder = 'sent' ''')
|
2013-06-21 23:32:22 +02:00
|
|
|
for row in queryreturn:
|
|
|
|
ackdata, = row
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info('Watching for ackdata %s', hexlify(ackdata))
|
2019-11-05 17:54:04 +01:00
|
|
|
state.ackdataForWhichImWatching[ackdata] = 0
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2017-09-30 11:19:44 +02:00
|
|
|
# Fix legacy (headerless) watched ackdata to include header
|
2019-11-05 17:54:04 +01:00
|
|
|
for oldack in state.ackdataForWhichImWatching:
|
2018-10-10 12:49:48 +02:00
|
|
|
if len(oldack) == 32:
|
2017-09-30 11:19:44 +02:00
|
|
|
# attach legacy header, always constant (msg/1/1)
|
|
|
|
newack = '\x00\x00\x00\x02\x01\x01' + oldack
|
2019-11-05 17:54:04 +01:00
|
|
|
state.ackdataForWhichImWatching[newack] = 0
|
2017-09-21 17:24:51 +02:00
|
|
|
sqlExecute(
|
2020-10-14 17:36:20 +02:00
|
|
|
'''UPDATE sent SET ackdata=? WHERE ackdata=? AND folder = 'sent' ''',
|
2017-09-21 17:24:51 +02:00
|
|
|
newack, oldack
|
|
|
|
)
|
2019-11-05 17:54:04 +01:00
|
|
|
del state.ackdataForWhichImWatching[oldack]
|
2017-09-30 11:19:44 +02:00
|
|
|
|
2019-12-02 13:10:52 +01:00
|
|
|
# For the case if user deleted knownnodes
|
|
|
|
# but is still having onionpeer objects in inventory
|
|
|
|
if not knownnodes.knownNodesActual:
|
|
|
|
for item in Inventory().by_type_and_tag(protocol.OBJECT_ONIONPEER):
|
|
|
|
queues.objectProcessorQueue.put((
|
|
|
|
protocol.OBJECT_ONIONPEER, item.payload
|
|
|
|
))
|
|
|
|
# FIXME: should also delete from inventory
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# give some time for the GUI to start
|
|
|
|
# before we start on existing POW tasks.
|
|
|
|
self.stop.wait(10)
|
2015-03-09 07:35:32 +01:00
|
|
|
|
2019-05-10 08:03:29 +02:00
|
|
|
if state.shutdown:
|
2019-04-26 15:59:56 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
# just in case there are any pending tasks for msg
|
|
|
|
# messages that have yet to be sent.
|
|
|
|
queues.workerQueue.put(('sendmessage', ''))
|
|
|
|
# just in case there are any tasks for Broadcasts
|
|
|
|
# that have yet to be sent.
|
|
|
|
queues.workerQueue.put(('sendbroadcast', ''))
|
|
|
|
|
|
|
|
# send onionpeer object
|
2019-06-14 11:38:48 +02:00
|
|
|
queues.workerQueue.put(('sendOnionPeerObj', ''))
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2017-01-14 23:20:15 +01:00
|
|
|
while state.shutdown == 0:
|
2016-04-20 15:33:01 +02:00
|
|
|
self.busy = 0
|
2017-02-08 13:41:56 +01:00
|
|
|
command, data = queues.workerQueue.get()
|
2016-04-20 15:33:01 +02:00
|
|
|
self.busy = 1
|
2013-06-21 23:32:22 +02:00
|
|
|
if command == 'sendmessage':
|
2016-04-17 20:31:25 +02:00
|
|
|
try:
|
|
|
|
self.sendMsg()
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("sendMsg didn't work")
|
2013-06-21 23:32:22 +02:00
|
|
|
elif command == 'sendbroadcast':
|
2016-04-17 20:31:25 +02:00
|
|
|
try:
|
|
|
|
self.sendBroadcast()
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("sendBroadcast didn't work")
|
2013-06-21 23:32:22 +02:00
|
|
|
elif command == 'doPOWForMyV2Pubkey':
|
2016-04-17 20:31:25 +02:00
|
|
|
try:
|
|
|
|
self.doPOWForMyV2Pubkey(data)
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("doPOWForMyV2Pubkey didn't work")
|
2013-07-22 07:10:22 +02:00
|
|
|
elif command == 'sendOutOrStoreMyV3Pubkey':
|
2016-04-17 20:31:25 +02:00
|
|
|
try:
|
|
|
|
self.sendOutOrStoreMyV3Pubkey(data)
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("sendOutOrStoreMyV3Pubkey didn't work")
|
2013-09-13 06:27:34 +02:00
|
|
|
elif command == 'sendOutOrStoreMyV4Pubkey':
|
2016-04-17 20:31:25 +02:00
|
|
|
try:
|
|
|
|
self.sendOutOrStoreMyV4Pubkey(data)
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("sendOutOrStoreMyV4Pubkey didn't work")
|
2019-04-26 15:59:56 +02:00
|
|
|
elif command == 'sendOnionPeerObj':
|
|
|
|
try:
|
|
|
|
self.sendOnionPeerObj(data)
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("sendOnionPeerObj didn't work")
|
2017-02-28 22:59:44 +01:00
|
|
|
elif command == 'resetPoW':
|
|
|
|
try:
|
|
|
|
proofofwork.resetPoW()
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("proofofwork.resetPoW didn't work")
|
2015-11-24 01:55:17 +01:00
|
|
|
elif command == 'stopThread':
|
2016-04-20 15:33:01 +02:00
|
|
|
self.busy = 0
|
2015-11-24 01:55:17 +01:00
|
|
|
return
|
2013-06-21 23:32:22 +02:00
|
|
|
else:
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.error(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Probable programming error: The command sent'
|
|
|
|
' to the workerThread is weird. It is: %s\n',
|
|
|
|
command
|
|
|
|
)
|
2013-06-29 19:29:35 +02:00
|
|
|
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.workerQueue.task_done()
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info("Quitting...")
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2017-09-21 17:29:32 +02:00
|
|
|
def _getKeysForAddress(self, address):
|
2022-01-28 13:55:23 +01:00
|
|
|
privSigningKeyBase58 = config.get(
|
2017-09-21 17:29:32 +02:00
|
|
|
address, 'privsigningkey')
|
2022-01-28 13:55:23 +01:00
|
|
|
privEncryptionKeyBase58 = config.get(
|
2017-09-21 17:29:32 +02:00
|
|
|
address, 'privencryptionkey')
|
|
|
|
|
|
|
|
privSigningKeyHex = hexlify(shared.decodeWalletImportFormat(
|
|
|
|
privSigningKeyBase58))
|
|
|
|
privEncryptionKeyHex = hexlify(shared.decodeWalletImportFormat(
|
|
|
|
privEncryptionKeyBase58))
|
|
|
|
|
|
|
|
# The \x04 on the beginning of the public keys are not sent.
|
|
|
|
# This way there is only one acceptable way to encode
|
|
|
|
# and send a public key.
|
|
|
|
pubSigningKey = unhexlify(highlevelcrypto.privToPub(
|
|
|
|
privSigningKeyHex))[1:]
|
|
|
|
pubEncryptionKey = unhexlify(highlevelcrypto.privToPub(
|
|
|
|
privEncryptionKeyHex))[1:]
|
|
|
|
|
|
|
|
return privSigningKeyHex, privEncryptionKeyHex, \
|
|
|
|
pubSigningKey, pubEncryptionKey
|
|
|
|
|
|
|
|
def _doPOWDefaults(self, payload, TTL,
|
|
|
|
log_prefix='',
|
|
|
|
log_time=False):
|
|
|
|
target = 2 ** 64 / (
|
|
|
|
defaults.networkDefaultProofOfWorkNonceTrialsPerByte * (
|
2021-08-16 18:39:16 +02:00
|
|
|
len(payload) + 8
|
|
|
|
+ defaults.networkDefaultPayloadLengthExtraBytes + ((
|
2017-09-21 17:29:32 +02:00
|
|
|
TTL * (
|
2021-08-16 18:39:16 +02:00
|
|
|
len(payload) + 8
|
|
|
|
+ defaults.networkDefaultPayloadLengthExtraBytes
|
2017-09-21 17:29:32 +02:00
|
|
|
)) / (2 ** 16))
|
|
|
|
))
|
|
|
|
initialHash = hashlib.sha512(payload).digest()
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:29:32 +02:00
|
|
|
'%s Doing proof of work... TTL set to %s', log_prefix, TTL)
|
|
|
|
if log_time:
|
|
|
|
start_time = time.time()
|
|
|
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:29:32 +02:00
|
|
|
'%s Found proof of work %s Nonce: %s',
|
|
|
|
log_prefix, trialValue, nonce
|
|
|
|
)
|
|
|
|
try:
|
|
|
|
delta = time.time() - start_time
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:29:32 +02:00
|
|
|
'PoW took %.1f seconds, speed %s.',
|
|
|
|
delta, sizeof_fmt(nonce / delta)
|
|
|
|
)
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722 # NameError
|
|
|
|
self.logger.warning("Proof of Work exception")
|
2017-09-21 17:29:32 +02:00
|
|
|
payload = pack('>Q', nonce) + payload
|
|
|
|
return payload
|
|
|
|
|
2018-05-02 17:29:55 +02:00
|
|
|
def doPOWForMyV2Pubkey(self, adressHash):
|
2020-01-17 14:27:36 +01:00
|
|
|
""" This function also broadcasts out the pubkey
|
|
|
|
message once it is done with the POW"""
|
2013-06-21 23:32:22 +02:00
|
|
|
# Look up my stream number based on my address hash
|
2018-05-02 17:29:55 +02:00
|
|
|
myAddress = shared.myAddressesByHash[adressHash]
|
2018-03-22 12:23:36 +01:00
|
|
|
# status
|
2020-01-17 14:27:36 +01:00
|
|
|
_, addressVersionNumber, streamNumber, adressHash = (
|
|
|
|
decodeAddress(myAddress))
|
2018-03-21 12:52:23 +01:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# 28 days from now plus or minus five minutes
|
|
|
|
TTL = int(28 * 24 * 60 * 60 + helper_random.randomrandrange(-300, 300))
|
2014-11-13 22:32:31 +01:00
|
|
|
embeddedTime = int(time.time() + TTL)
|
2014-08-27 09:14:32 +02:00
|
|
|
payload = pack('>Q', (embeddedTime))
|
2017-09-21 17:24:51 +02:00
|
|
|
payload += '\x00\x00\x00\x01' # object type: pubkey
|
2013-06-21 23:32:22 +02:00
|
|
|
payload += encodeVarint(addressVersionNumber) # Address version number
|
|
|
|
payload += encodeVarint(streamNumber)
|
2017-09-21 17:24:51 +02:00
|
|
|
# bitfield of features supported by me (see the wiki).
|
|
|
|
payload += protocol.getBitfield(myAddress)
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
try:
|
2017-09-21 17:29:32 +02:00
|
|
|
# privSigningKeyHex, privEncryptionKeyHex
|
|
|
|
_, _, pubSigningKey, pubEncryptionKey = \
|
|
|
|
self._getKeysForAddress(myAddress)
|
2021-08-16 17:43:44 +02:00
|
|
|
except (configparser.NoSectionError, configparser.NoOptionError) as err:
|
|
|
|
self.logger.warning("Section or Option did not found: %s", err)
|
2013-06-21 23:32:22 +02:00
|
|
|
except Exception as err:
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.error(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Error within doPOWForMyV2Pubkey. Could not read'
|
|
|
|
' the keys from the keys.dat file for a requested'
|
2018-03-22 11:10:40 +01:00
|
|
|
' address. %s\n', err
|
2017-09-21 17:24:51 +02:00
|
|
|
)
|
2013-06-21 23:32:22 +02:00
|
|
|
return
|
|
|
|
|
2017-09-21 17:29:32 +02:00
|
|
|
payload += pubSigningKey + pubEncryptionKey
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
# Do the POW for this pubkey message
|
2017-09-21 17:29:32 +02:00
|
|
|
payload = self._doPOWDefaults(
|
|
|
|
payload, TTL, log_prefix='(For pubkey message)')
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
inventoryHash = calculateInventoryHash(payload)
|
2014-08-27 09:14:32 +02:00
|
|
|
objectType = 1
|
2017-01-10 21:15:35 +01:00
|
|
|
Inventory()[inventoryHash] = (
|
2017-09-21 17:24:51 +02:00
|
|
|
objectType, streamNumber, payload, embeddedTime, '')
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
|
|
|
'broadcasting inv with hash: %s', hexlify(inventoryHash))
|
2013-06-29 19:29:35 +02:00
|
|
|
|
2017-08-09 17:36:52 +02:00
|
|
|
queues.invQueue.put((streamNumber, inventoryHash))
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put(('updateStatusBar', ''))
|
2013-11-07 05:38:19 +01:00
|
|
|
try:
|
2022-01-28 13:55:23 +01:00
|
|
|
config.set(
|
2013-11-07 05:38:19 +01:00
|
|
|
myAddress, 'lastpubkeysendtime', str(int(time.time())))
|
2022-01-28 13:55:23 +01:00
|
|
|
config.save()
|
2021-08-16 17:43:44 +02:00
|
|
|
except configparser.NoSectionError:
|
2017-09-21 17:24:51 +02:00
|
|
|
# The user deleted the address out of the keys.dat file
|
|
|
|
# before this finished.
|
2013-11-07 05:38:19 +01:00
|
|
|
pass
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
2022-01-28 13:55:23 +01:00
|
|
|
self.logger.warning("config.set didn't work")
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2018-05-02 17:29:55 +02:00
|
|
|
def sendOutOrStoreMyV3Pubkey(self, adressHash):
|
2018-10-10 12:49:48 +02:00
|
|
|
"""
|
|
|
|
If this isn't a chan address, this function assembles the pubkey data, does the necessary POW and sends it out.
|
|
|
|
If it *is* a chan then it assembles the pubkey and stores is in the pubkey table so that we can send messages
|
|
|
|
to "ourselves".
|
|
|
|
"""
|
2013-11-07 05:38:19 +01:00
|
|
|
try:
|
2018-05-02 17:29:55 +02:00
|
|
|
myAddress = shared.myAddressesByHash[adressHash]
|
2021-08-16 17:43:44 +02:00
|
|
|
except KeyError:
|
2017-09-21 17:24:51 +02:00
|
|
|
# The address has been deleted.
|
2021-08-16 17:43:44 +02:00
|
|
|
self.logger.warning("Can't find %s in myAddressByHash", hexlify(adressHash))
|
2013-11-07 05:38:19 +01:00
|
|
|
return
|
2022-01-28 13:55:23 +01:00
|
|
|
if config.safeGetBoolean(myAddress, 'chan'):
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info('This is a chan address. Not sending pubkey.')
|
2013-09-30 01:24:27 +02:00
|
|
|
return
|
2018-05-02 17:29:55 +02:00
|
|
|
_, addressVersionNumber, streamNumber, adressHash = decodeAddress(
|
2013-06-21 23:32:22 +02:00
|
|
|
myAddress)
|
2018-03-21 12:52:23 +01:00
|
|
|
|
|
|
|
# 28 days from now plus or minus five minutes
|
2017-09-21 17:24:51 +02:00
|
|
|
TTL = int(28 * 24 * 60 * 60 + helper_random.randomrandrange(-300, 300))
|
2014-11-13 22:32:31 +01:00
|
|
|
embeddedTime = int(time.time() + TTL)
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# signedTimeForProtocolV2 = embeddedTime - TTL
|
2018-05-02 17:29:55 +02:00
|
|
|
# According to the protocol specification, the expiresTime
|
|
|
|
# along with the pubkey information is signed. But to be
|
|
|
|
# backwards compatible during the upgrade period, we shall sign
|
|
|
|
# not the expiresTime but rather the current time. There must be
|
|
|
|
# precisely a 28 day difference between the two. After the upgrade
|
|
|
|
# period we'll switch to signing the whole payload with the
|
|
|
|
# expiresTime time.
|
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
payload = pack('>Q', (embeddedTime))
|
2017-09-21 17:24:51 +02:00
|
|
|
payload += '\x00\x00\x00\x01' # object type: pubkey
|
2013-06-21 23:32:22 +02:00
|
|
|
payload += encodeVarint(addressVersionNumber) # Address version number
|
|
|
|
payload += encodeVarint(streamNumber)
|
2017-09-21 17:24:51 +02:00
|
|
|
# bitfield of features supported by me (see the wiki).
|
|
|
|
payload += protocol.getBitfield(myAddress)
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
try:
|
2018-03-22 12:23:36 +01:00
|
|
|
# , privEncryptionKeyHex
|
|
|
|
privSigningKeyHex, _, pubSigningKey, pubEncryptionKey = \
|
|
|
|
self._getKeysForAddress(myAddress)
|
2021-08-16 17:43:44 +02:00
|
|
|
except (configparser.NoSectionError, configparser.NoOptionError) as err:
|
|
|
|
self.logger.warning("Section or Option did not found: %s", err)
|
2013-06-21 23:32:22 +02:00
|
|
|
except Exception as err:
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.error(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Error within sendOutOrStoreMyV3Pubkey. Could not read'
|
|
|
|
' the keys from the keys.dat file for a requested'
|
2018-03-22 11:10:40 +01:00
|
|
|
' address. %s\n', err
|
2017-09-21 17:24:51 +02:00
|
|
|
)
|
2013-06-21 23:32:22 +02:00
|
|
|
return
|
|
|
|
|
2017-09-21 17:29:32 +02:00
|
|
|
payload += pubSigningKey + pubEncryptionKey
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2022-01-28 13:55:23 +01:00
|
|
|
payload += encodeVarint(config.getint(
|
2013-06-21 23:32:22 +02:00
|
|
|
myAddress, 'noncetrialsperbyte'))
|
2022-01-28 13:55:23 +01:00
|
|
|
payload += encodeVarint(config.getint(
|
2013-06-21 23:32:22 +02:00
|
|
|
myAddress, 'payloadlengthextrabytes'))
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
signature = highlevelcrypto.sign(payload, privSigningKeyHex)
|
2013-06-21 23:32:22 +02:00
|
|
|
payload += encodeVarint(len(signature))
|
|
|
|
payload += signature
|
|
|
|
|
2013-09-30 01:24:27 +02:00
|
|
|
# Do the POW for this pubkey message
|
2017-09-21 17:29:32 +02:00
|
|
|
payload = self._doPOWDefaults(
|
|
|
|
payload, TTL, log_prefix='(For pubkey message)')
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2013-09-30 01:24:27 +02:00
|
|
|
inventoryHash = calculateInventoryHash(payload)
|
2014-08-27 09:14:32 +02:00
|
|
|
objectType = 1
|
2017-01-10 21:15:35 +01:00
|
|
|
Inventory()[inventoryHash] = (
|
2017-09-21 17:24:51 +02:00
|
|
|
objectType, streamNumber, payload, embeddedTime, '')
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
|
|
|
'broadcasting inv with hash: %s', hexlify(inventoryHash))
|
2013-06-29 19:29:35 +02:00
|
|
|
|
2017-08-09 17:36:52 +02:00
|
|
|
queues.invQueue.put((streamNumber, inventoryHash))
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put(('updateStatusBar', ''))
|
2013-11-07 05:38:19 +01:00
|
|
|
try:
|
2022-01-28 13:55:23 +01:00
|
|
|
config.set(
|
2013-11-07 05:38:19 +01:00
|
|
|
myAddress, 'lastpubkeysendtime', str(int(time.time())))
|
2022-01-28 13:55:23 +01:00
|
|
|
config.save()
|
2021-08-16 17:43:44 +02:00
|
|
|
except configparser.NoSectionError:
|
2017-09-21 17:29:32 +02:00
|
|
|
# The user deleted the address out of the keys.dat file
|
|
|
|
# before this finished.
|
2013-11-07 05:38:19 +01:00
|
|
|
pass
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("BMConfigParser().set didn't work")
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2013-09-13 06:27:34 +02:00
|
|
|
def sendOutOrStoreMyV4Pubkey(self, myAddress):
|
2018-10-10 12:49:48 +02:00
|
|
|
"""
|
|
|
|
It doesn't send directly anymore. It put is to a queue for another thread to send at an appropriate time,
|
|
|
|
whereas in the past it directly appended it to the outgoing buffer, I think. Same with all the other methods in
|
|
|
|
this class.
|
|
|
|
"""
|
2022-01-28 13:55:23 +01:00
|
|
|
if not config.has_section(myAddress):
|
2017-09-21 17:24:51 +02:00
|
|
|
# The address has been deleted.
|
2013-11-07 05:38:19 +01:00
|
|
|
return
|
2022-01-28 13:55:23 +01:00
|
|
|
if config.safeGetBoolean(myAddress, 'chan'):
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info('This is a chan address. Not sending pubkey.')
|
2013-09-30 01:24:27 +02:00
|
|
|
return
|
2018-05-02 17:29:55 +02:00
|
|
|
_, addressVersionNumber, streamNumber, addressHash = decodeAddress(
|
2013-09-13 06:27:34 +02:00
|
|
|
myAddress)
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2018-03-21 12:52:23 +01:00
|
|
|
# 28 days from now plus or minus five minutes
|
2017-09-21 17:24:51 +02:00
|
|
|
TTL = int(28 * 24 * 60 * 60 + helper_random.randomrandrange(-300, 300))
|
2014-11-13 22:32:31 +01:00
|
|
|
embeddedTime = int(time.time() + TTL)
|
2013-09-13 06:27:34 +02:00
|
|
|
payload = pack('>Q', (embeddedTime))
|
2017-09-21 17:24:51 +02:00
|
|
|
payload += '\x00\x00\x00\x01' # object type: pubkey
|
2013-09-13 06:27:34 +02:00
|
|
|
payload += encodeVarint(addressVersionNumber) # Address version number
|
|
|
|
payload += encodeVarint(streamNumber)
|
2017-01-11 14:27:19 +01:00
|
|
|
dataToEncrypt = protocol.getBitfield(myAddress)
|
2013-09-13 06:27:34 +02:00
|
|
|
|
|
|
|
try:
|
2018-03-22 12:48:07 +01:00
|
|
|
# , privEncryptionKeyHex
|
|
|
|
privSigningKeyHex, _, pubSigningKey, pubEncryptionKey = \
|
|
|
|
self._getKeysForAddress(myAddress)
|
2021-08-16 17:43:44 +02:00
|
|
|
except (configparser.NoSectionError, configparser.NoOptionError) as err:
|
|
|
|
self.logger.warning("Section or Option did not found: %s", err)
|
2013-09-13 06:27:34 +02:00
|
|
|
except Exception as err:
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.error(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Error within sendOutOrStoreMyV4Pubkey. Could not read'
|
|
|
|
' the keys from the keys.dat file for a requested'
|
2018-03-22 11:10:40 +01:00
|
|
|
' address. %s\n', err
|
2017-09-21 17:24:51 +02:00
|
|
|
)
|
2013-09-13 06:27:34 +02:00
|
|
|
return
|
|
|
|
|
2017-09-21 17:29:32 +02:00
|
|
|
dataToEncrypt += pubSigningKey + pubEncryptionKey
|
2013-09-13 06:27:34 +02:00
|
|
|
|
2022-01-28 13:55:23 +01:00
|
|
|
dataToEncrypt += encodeVarint(config.getint(
|
2013-09-13 06:27:34 +02:00
|
|
|
myAddress, 'noncetrialsperbyte'))
|
2022-01-28 13:55:23 +01:00
|
|
|
dataToEncrypt += encodeVarint(config.getint(
|
2013-09-13 06:27:34 +02:00
|
|
|
myAddress, 'payloadlengthextrabytes'))
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
# When we encrypt, we'll use a hash of the data
|
2017-09-21 17:24:51 +02:00
|
|
|
# contained in an address as a decryption key. This way
|
|
|
|
# in order to read the public keys in a pubkey message,
|
|
|
|
# a node must know the address first. We'll also tag,
|
|
|
|
# unencrypted, the pubkey with part of the hash so that nodes
|
|
|
|
# know which pubkey object to try to decrypt
|
|
|
|
# when they want to send a message.
|
|
|
|
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(
|
2021-08-16 18:39:16 +02:00
|
|
|
encodeVarint(addressVersionNumber)
|
|
|
|
+ encodeVarint(streamNumber) + addressHash
|
2017-09-21 17:24:51 +02:00
|
|
|
).digest()).digest()
|
|
|
|
payload += doubleHashOfAddressData[32:] # the tag
|
|
|
|
signature = highlevelcrypto.sign(
|
|
|
|
payload + dataToEncrypt, privSigningKeyHex
|
|
|
|
)
|
2014-08-27 09:14:32 +02:00
|
|
|
dataToEncrypt += encodeVarint(len(signature))
|
|
|
|
dataToEncrypt += signature
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2013-09-30 01:24:27 +02:00
|
|
|
privEncryptionKey = doubleHashOfAddressData[:32]
|
2014-05-21 12:15:07 +02:00
|
|
|
pubEncryptionKey = highlevelcrypto.pointMult(privEncryptionKey)
|
2013-09-30 01:24:27 +02:00
|
|
|
payload += highlevelcrypto.encrypt(
|
2016-03-23 23:26:57 +01:00
|
|
|
dataToEncrypt, hexlify(pubEncryptionKey))
|
2013-09-18 06:04:01 +02:00
|
|
|
|
2013-09-30 01:24:27 +02:00
|
|
|
# Do the POW for this pubkey message
|
2017-09-21 17:29:32 +02:00
|
|
|
payload = self._doPOWDefaults(
|
|
|
|
payload, TTL, log_prefix='(For pubkey message)')
|
2013-09-13 06:27:34 +02:00
|
|
|
|
2013-09-30 01:24:27 +02:00
|
|
|
inventoryHash = calculateInventoryHash(payload)
|
2014-08-27 09:14:32 +02:00
|
|
|
objectType = 1
|
2017-01-10 21:15:35 +01:00
|
|
|
Inventory()[inventoryHash] = (
|
2017-09-21 17:24:51 +02:00
|
|
|
objectType, streamNumber, payload, embeddedTime,
|
|
|
|
doubleHashOfAddressData[32:]
|
|
|
|
)
|
2013-09-13 06:27:34 +02:00
|
|
|
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
|
|
|
'broadcasting inv with hash: %s', hexlify(inventoryHash))
|
2013-09-18 06:04:01 +02:00
|
|
|
|
2017-08-09 17:36:52 +02:00
|
|
|
queues.invQueue.put((streamNumber, inventoryHash))
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put(('updateStatusBar', ''))
|
2013-10-26 01:35:59 +02:00
|
|
|
try:
|
2022-01-28 13:55:23 +01:00
|
|
|
config.set(
|
2013-10-26 01:35:59 +02:00
|
|
|
myAddress, 'lastpubkeysendtime', str(int(time.time())))
|
2022-01-28 13:55:23 +01:00
|
|
|
config.save()
|
2013-12-06 07:52:19 +01:00
|
|
|
except Exception as err:
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.error(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Error: Couldn\'t add the lastpubkeysendtime'
|
2018-03-22 11:10:40 +01:00
|
|
|
' to the keys.dat file. Error message: %s', err
|
2017-09-21 17:24:51 +02:00
|
|
|
)
|
2013-09-13 06:27:34 +02:00
|
|
|
|
2019-06-14 11:38:48 +02:00
|
|
|
def sendOnionPeerObj(self, peer=None):
|
2019-05-06 12:05:21 +02:00
|
|
|
"""Send onionpeer object representing peer"""
|
2019-06-14 11:38:48 +02:00
|
|
|
if not peer: # find own onionhostname
|
2019-12-20 14:20:14 +01:00
|
|
|
for peer in state.ownAddresses:
|
|
|
|
if peer.host.endswith('.onion'):
|
2019-06-14 11:38:48 +02:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
return
|
2019-04-26 15:59:56 +02:00
|
|
|
TTL = int(7 * 24 * 60 * 60 + helper_random.randomrandrange(-300, 300))
|
|
|
|
embeddedTime = int(time.time() + TTL)
|
|
|
|
streamNumber = 1 # Don't know yet what should be here
|
|
|
|
objectType = protocol.OBJECT_ONIONPEER
|
|
|
|
# FIXME: ideally the objectPayload should be signed
|
|
|
|
objectPayload = encodeVarint(peer.port) + protocol.encodeHost(peer.host)
|
|
|
|
tag = calculateInventoryHash(objectPayload)
|
|
|
|
|
|
|
|
if Inventory().by_type_and_tag(objectType, tag):
|
|
|
|
return # not expired
|
|
|
|
|
2019-06-06 08:21:59 +02:00
|
|
|
payload = pack('>Q', embeddedTime)
|
|
|
|
payload += pack('>I', objectType)
|
2019-04-26 15:59:56 +02:00
|
|
|
payload += encodeVarint(2 if len(peer.host) == 22 else 3)
|
|
|
|
payload += encodeVarint(streamNumber)
|
|
|
|
payload += objectPayload
|
|
|
|
|
|
|
|
payload = self._doPOWDefaults(
|
|
|
|
payload, TTL, log_prefix='(For onionpeer object)')
|
|
|
|
|
|
|
|
inventoryHash = calculateInventoryHash(payload)
|
|
|
|
Inventory()[inventoryHash] = (
|
|
|
|
objectType, streamNumber, buffer(payload),
|
|
|
|
embeddedTime, buffer(tag)
|
|
|
|
)
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2019-04-26 15:59:56 +02:00
|
|
|
'sending inv (within sendOnionPeerObj function) for object: %s',
|
|
|
|
hexlify(inventoryHash))
|
|
|
|
queues.invQueue.put((streamNumber, inventoryHash))
|
|
|
|
|
2013-06-21 23:32:22 +02:00
|
|
|
def sendBroadcast(self):
|
2018-10-10 12:49:48 +02:00
|
|
|
"""Send a broadcast-type object (assemble the object, perform PoW and put it to the inv announcement queue)"""
|
2016-10-05 20:06:47 +02:00
|
|
|
# Reset just in case
|
|
|
|
sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET status='broadcastqueued' '''
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2020-09-19 13:58:23 +02:00
|
|
|
'''WHERE status = 'doingbroadcastpow' AND folder = 'sent' ''')
|
2013-08-29 13:27:30 +02:00
|
|
|
queryreturn = sqlQuery(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''SELECT fromaddress, subject, message, '''
|
|
|
|
''' ackdata, ttl, encodingtype FROM sent '''
|
|
|
|
''' WHERE status=? and folder='sent' ''', 'broadcastqueued')
|
2016-10-05 20:06:47 +02:00
|
|
|
|
2013-06-21 23:32:22 +02:00
|
|
|
for row in queryreturn:
|
2016-11-14 20:23:58 +01:00
|
|
|
fromaddress, subject, body, ackdata, TTL, encoding = row
|
2018-03-22 12:23:36 +01:00
|
|
|
# status
|
|
|
|
_, addressVersionNumber, streamNumber, ripe = \
|
2017-09-21 17:24:51 +02:00
|
|
|
decodeAddress(fromaddress)
|
2013-07-31 18:36:51 +02:00
|
|
|
if addressVersionNumber <= 1:
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.error(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Error: In the singleWorker thread, the '
|
|
|
|
' sendBroadcast function doesn\'t understand'
|
|
|
|
' the address version.\n')
|
2013-07-31 18:36:51 +02:00
|
|
|
return
|
|
|
|
# We need to convert our private keys to public keys in order
|
|
|
|
# to include them.
|
|
|
|
try:
|
2018-03-22 12:48:07 +01:00
|
|
|
# , privEncryptionKeyHex
|
|
|
|
privSigningKeyHex, _, pubSigningKey, pubEncryptionKey = \
|
|
|
|
self._getKeysForAddress(fromaddress)
|
2021-08-16 17:43:44 +02:00
|
|
|
except (configparser.NoSectionError, configparser.NoOptionError) as err:
|
|
|
|
self.logger.warning("Section or Option did not found: %s", err)
|
2021-08-23 13:56:28 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Error! Could not find sender address"
|
|
|
|
" (your address) in the keys.dat file."))
|
|
|
|
))
|
2021-08-16 17:43:44 +02:00
|
|
|
except Exception as err:
|
|
|
|
self.logger.error(
|
|
|
|
'Error within sendBroadcast. Could not read'
|
|
|
|
' the keys from the keys.dat file for a requested'
|
|
|
|
' address. %s\n', err
|
|
|
|
)
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
2021-08-23 13:56:28 +02:00
|
|
|
"Error, can't send."))
|
2017-09-21 17:24:51 +02:00
|
|
|
))
|
2013-07-31 18:36:51 +02:00
|
|
|
continue
|
|
|
|
|
2020-09-19 13:58:23 +02:00
|
|
|
if not sqlExecute(
|
2020-10-12 19:47:05 +02:00
|
|
|
'''UPDATE sent SET status='doingbroadcastpow' '''
|
|
|
|
''' WHERE ackdata=? AND status='broadcastqueued' '''
|
|
|
|
''' AND folder='sent' ''',
|
|
|
|
ackdata):
|
2020-09-19 13:58:23 +02:00
|
|
|
continue
|
2016-10-05 20:06:47 +02:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# At this time these pubkeys are 65 bytes long
|
|
|
|
# because they include the encoding byte which we won't
|
|
|
|
# be sending in the broadcast message.
|
2017-09-21 17:29:32 +02:00
|
|
|
# pubSigningKey = \
|
|
|
|
# highlevelcrypto.privToPub(privSigningKeyHex).decode('hex')
|
2013-07-31 18:36:51 +02:00
|
|
|
|
2015-03-09 07:35:32 +01:00
|
|
|
if TTL > 28 * 24 * 60 * 60:
|
|
|
|
TTL = 28 * 24 * 60 * 60
|
2017-09-21 17:24:51 +02:00
|
|
|
if TTL < 60 * 60:
|
|
|
|
TTL = 60 * 60
|
2018-03-21 12:52:23 +01:00
|
|
|
# add some randomness to the TTL
|
2017-09-21 17:24:51 +02:00
|
|
|
TTL = int(TTL + helper_random.randomrandrange(-300, 300))
|
2014-11-13 22:32:31 +01:00
|
|
|
embeddedTime = int(time.time() + TTL)
|
2014-08-27 09:14:32 +02:00
|
|
|
payload = pack('>Q', embeddedTime)
|
2017-09-21 17:24:51 +02:00
|
|
|
payload += '\x00\x00\x00\x03' # object type: broadcast
|
2014-12-25 09:57:34 +01:00
|
|
|
|
|
|
|
if addressVersionNumber <= 3:
|
|
|
|
payload += encodeVarint(4) # broadcast version
|
|
|
|
else:
|
|
|
|
payload += encodeVarint(5) # broadcast version
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2013-07-31 18:36:51 +02:00
|
|
|
payload += encodeVarint(streamNumber)
|
2013-09-15 03:06:26 +02:00
|
|
|
if addressVersionNumber >= 4:
|
2017-09-21 17:24:51 +02:00
|
|
|
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(
|
2021-08-16 18:39:16 +02:00
|
|
|
encodeVarint(addressVersionNumber)
|
|
|
|
+ encodeVarint(streamNumber) + ripe
|
2017-09-21 17:24:51 +02:00
|
|
|
).digest()).digest()
|
2013-12-01 06:45:37 +01:00
|
|
|
tag = doubleHashOfAddressData[32:]
|
|
|
|
payload += tag
|
|
|
|
else:
|
|
|
|
tag = ''
|
2013-07-31 18:36:51 +02:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
dataToEncrypt = encodeVarint(addressVersionNumber)
|
2013-07-31 18:36:51 +02:00
|
|
|
dataToEncrypt += encodeVarint(streamNumber)
|
2017-09-21 17:24:51 +02:00
|
|
|
# behavior bitfield
|
|
|
|
dataToEncrypt += protocol.getBitfield(fromaddress)
|
2017-09-21 17:29:32 +02:00
|
|
|
dataToEncrypt += pubSigningKey + pubEncryptionKey
|
2013-07-31 18:36:51 +02:00
|
|
|
if addressVersionNumber >= 3:
|
2022-01-28 13:55:23 +01:00
|
|
|
dataToEncrypt += encodeVarint(config.getint(
|
2017-09-21 17:24:51 +02:00
|
|
|
fromaddress, 'noncetrialsperbyte'))
|
2022-01-28 13:55:23 +01:00
|
|
|
dataToEncrypt += encodeVarint(config.getint(
|
2017-09-21 17:24:51 +02:00
|
|
|
fromaddress, 'payloadlengthextrabytes'))
|
|
|
|
# message encoding type
|
|
|
|
dataToEncrypt += encodeVarint(encoding)
|
|
|
|
encodedMessage = helper_msgcoding.MsgEncode(
|
|
|
|
{"subject": subject, "body": body}, encoding)
|
2016-11-14 20:23:58 +01:00
|
|
|
dataToEncrypt += encodeVarint(encodedMessage.length)
|
|
|
|
dataToEncrypt += encodedMessage.data
|
2014-12-25 09:57:34 +01:00
|
|
|
dataToSign = payload + dataToEncrypt
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2013-07-31 18:36:51 +02:00
|
|
|
signature = highlevelcrypto.sign(
|
2014-08-27 09:14:32 +02:00
|
|
|
dataToSign, privSigningKeyHex)
|
2013-07-31 18:36:51 +02:00
|
|
|
dataToEncrypt += encodeVarint(len(signature))
|
|
|
|
dataToEncrypt += signature
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# Encrypt the broadcast with the information
|
|
|
|
# contained in the broadcaster's address.
|
|
|
|
# Anyone who knows the address can generate
|
|
|
|
# the private encryption key to decrypt the broadcast.
|
|
|
|
# This provides virtually no privacy; its purpose is to keep
|
|
|
|
# questionable and illegal content from flowing through the
|
|
|
|
# Internet connections and being stored on the disk of 3rd parties.
|
2013-09-15 03:06:26 +02:00
|
|
|
if addressVersionNumber <= 3:
|
2017-09-21 17:24:51 +02:00
|
|
|
privEncryptionKey = hashlib.sha512(
|
2021-08-16 18:39:16 +02:00
|
|
|
encodeVarint(addressVersionNumber)
|
|
|
|
+ encodeVarint(streamNumber) + ripe
|
2017-09-21 17:24:51 +02:00
|
|
|
).digest()[:32]
|
2013-09-15 03:06:26 +02:00
|
|
|
else:
|
|
|
|
privEncryptionKey = doubleHashOfAddressData[:32]
|
2013-12-01 06:45:37 +01:00
|
|
|
|
2014-05-21 12:15:07 +02:00
|
|
|
pubEncryptionKey = highlevelcrypto.pointMult(privEncryptionKey)
|
2013-07-31 18:36:51 +02:00
|
|
|
payload += highlevelcrypto.encrypt(
|
2016-03-23 23:26:57 +01:00
|
|
|
dataToEncrypt, hexlify(pubEncryptionKey))
|
2013-07-31 18:36:51 +02:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Doing work necessary to send broadcast..."))
|
|
|
|
))
|
2017-09-21 17:29:32 +02:00
|
|
|
payload = self._doPOWDefaults(
|
|
|
|
payload, TTL, log_prefix='(For broadcast message)')
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
|
|
# Sanity check. The payload size should never be larger
|
|
|
|
# than 256 KiB. There should be checks elsewhere in the code
|
|
|
|
# to not let the user try to send a message this large
|
|
|
|
# until we implement message continuation.
|
|
|
|
if len(payload) > 2 ** 18: # 256 KiB
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.critical(
|
2017-09-21 17:24:51 +02:00
|
|
|
'This broadcast object is too large to send.'
|
|
|
|
' This should never happen. Object size: %s',
|
|
|
|
len(payload)
|
|
|
|
)
|
2014-08-27 09:14:32 +02:00
|
|
|
continue
|
2013-06-29 19:29:35 +02:00
|
|
|
|
2013-07-31 18:36:51 +02:00
|
|
|
inventoryHash = calculateInventoryHash(payload)
|
2014-08-27 09:14:32 +02:00
|
|
|
objectType = 3
|
2017-01-10 21:15:35 +01:00
|
|
|
Inventory()[inventoryHash] = (
|
2014-08-27 09:14:32 +02:00
|
|
|
objectType, streamNumber, payload, embeddedTime, tag)
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'sending inv (within sendBroadcast function)'
|
|
|
|
' for object: %s',
|
|
|
|
hexlify(inventoryHash)
|
|
|
|
)
|
2017-08-09 17:36:52 +02:00
|
|
|
queues.invQueue.put((streamNumber, inventoryHash))
|
2013-07-31 18:36:51 +02:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Broadcast sent on %1"
|
|
|
|
).arg(l10n.formatTimestamp()))
|
|
|
|
))
|
2013-07-31 18:36:51 +02:00
|
|
|
|
|
|
|
# Update the status of the message in the 'sent' table to have
|
|
|
|
# a 'broadcastsent' status
|
2013-08-29 13:27:30 +02:00
|
|
|
sqlExecute(
|
2020-10-14 17:36:20 +02:00
|
|
|
'''UPDATE sent SET msgid=?, status=?, lastactiontime=? '''
|
|
|
|
''' WHERE ackdata=? AND folder='sent' ''',
|
2017-09-21 17:24:51 +02:00
|
|
|
inventoryHash, 'broadcastsent', int(time.time()), ackdata
|
|
|
|
)
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
def sendMsg(self):
|
2018-10-10 12:49:48 +02:00
|
|
|
"""Send a message-type object (assemble the object, perform PoW and put it to the inv announcement queue)"""
|
|
|
|
# pylint: disable=too-many-nested-blocks
|
2016-10-05 20:06:47 +02:00
|
|
|
# Reset just in case
|
|
|
|
sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET status='msgqueued' '''
|
2020-09-19 13:58:23 +02:00
|
|
|
''' WHERE status IN ('doingpubkeypow', 'doingmsgpow') '''
|
|
|
|
''' AND folder='sent' ''')
|
2016-10-05 20:06:47 +02:00
|
|
|
queryreturn = sqlQuery(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''SELECT toaddress, fromaddress, subject, message, '''
|
|
|
|
''' ackdata, status, ttl, retrynumber, encodingtype FROM '''
|
|
|
|
''' sent WHERE (status='msgqueued' or status='forcepow') '''
|
|
|
|
''' and folder='sent' ''')
|
|
|
|
# while we have a msg that needs some work
|
|
|
|
for row in queryreturn:
|
|
|
|
toaddress, fromaddress, subject, message, \
|
|
|
|
ackdata, status, TTL, retryNumber, encoding = row
|
2018-03-22 12:48:07 +01:00
|
|
|
# toStatus
|
|
|
|
_, toAddressVersionNumber, toStreamNumber, toRipe = \
|
2017-09-21 17:24:51 +02:00
|
|
|
decodeAddress(toaddress)
|
2018-03-22 12:23:36 +01:00
|
|
|
# fromStatus, , ,fromRipe
|
|
|
|
_, fromAddressVersionNumber, fromStreamNumber, _ = \
|
|
|
|
decodeAddress(fromaddress)
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
|
|
# We may or may not already have the pubkey
|
|
|
|
# for this toAddress. Let's check.
|
2014-08-27 09:14:32 +02:00
|
|
|
if status == 'forcepow':
|
2017-09-21 17:24:51 +02:00
|
|
|
# if the status of this msg is 'forcepow'
|
|
|
|
# then clearly we have the pubkey already
|
|
|
|
# because the user could not have overridden the message
|
|
|
|
# about the POW being too difficult without knowing
|
|
|
|
# the required difficulty.
|
2014-08-27 09:14:32 +02:00
|
|
|
pass
|
2015-03-09 07:35:32 +01:00
|
|
|
elif status == 'doingmsgpow':
|
2017-09-21 17:24:51 +02:00
|
|
|
# We wouldn't have set the status to doingmsgpow
|
|
|
|
# if we didn't already have the pubkey so let's assume
|
|
|
|
# that we have it.
|
2015-03-09 07:35:32 +01:00
|
|
|
pass
|
2017-09-21 17:24:51 +02:00
|
|
|
# If we are sending a message to ourselves or a chan
|
|
|
|
# then we won't need an entry in the pubkeys table;
|
|
|
|
# we can calculate the needed pubkey using the private keys
|
|
|
|
# in our keys.dat file.
|
2022-01-28 13:55:23 +01:00
|
|
|
elif config.has_section(toaddress):
|
2020-09-19 13:58:23 +02:00
|
|
|
if not sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET status='doingmsgpow' '''
|
2020-09-19 13:58:23 +02:00
|
|
|
''' WHERE toaddress=? AND status='msgqueued' AND folder='sent' ''',
|
2017-09-21 17:24:51 +02:00
|
|
|
toaddress
|
2020-09-19 13:58:23 +02:00
|
|
|
):
|
|
|
|
continue
|
2017-09-21 17:24:51 +02:00
|
|
|
status = 'doingmsgpow'
|
2015-03-09 07:35:32 +01:00
|
|
|
elif status == 'msgqueued':
|
2014-08-27 09:14:32 +02:00
|
|
|
# Let's see if we already have the pubkey in our pubkeys table
|
|
|
|
queryreturn = sqlQuery(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''SELECT address FROM pubkeys WHERE address=?''',
|
|
|
|
toaddress
|
|
|
|
)
|
|
|
|
# If we have the needed pubkey in the pubkey table already,
|
|
|
|
if queryreturn != []:
|
2014-08-27 09:14:32 +02:00
|
|
|
# set the status of this msg to doingmsgpow
|
2020-09-19 13:58:23 +02:00
|
|
|
if not sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET status='doingmsgpow' '''
|
2020-09-19 13:58:23 +02:00
|
|
|
''' WHERE toaddress=? AND status='msgqueued' AND folder='sent' ''',
|
2017-09-21 17:24:51 +02:00
|
|
|
toaddress
|
2020-09-19 13:58:23 +02:00
|
|
|
):
|
|
|
|
continue
|
2014-08-27 09:14:32 +02:00
|
|
|
status = 'doingmsgpow'
|
2017-09-21 17:24:51 +02:00
|
|
|
# mark the pubkey as 'usedpersonally' so that
|
|
|
|
# we don't delete it later. If the pubkey version
|
|
|
|
# is >= 4 then usedpersonally will already be set
|
|
|
|
# to yes because we'll only ever have
|
2015-03-09 07:35:32 +01:00
|
|
|
# usedpersonally v4 pubkeys in the pubkeys table.
|
2014-08-27 09:14:32 +02:00
|
|
|
sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE pubkeys SET usedpersonally='yes' '''
|
|
|
|
''' WHERE address=?''',
|
|
|
|
toaddress
|
|
|
|
)
|
|
|
|
# We don't have the needed pubkey in the pubkeys table already.
|
|
|
|
else:
|
2014-08-27 09:14:32 +02:00
|
|
|
if toAddressVersionNumber <= 3:
|
|
|
|
toTag = ''
|
|
|
|
else:
|
2017-09-21 17:24:51 +02:00
|
|
|
toTag = hashlib.sha512(hashlib.sha512(
|
2021-08-16 18:39:16 +02:00
|
|
|
encodeVarint(toAddressVersionNumber)
|
|
|
|
+ encodeVarint(toStreamNumber) + toRipe
|
2017-09-21 17:24:51 +02:00
|
|
|
).digest()).digest()[32:]
|
|
|
|
if toaddress in state.neededPubkeys or \
|
|
|
|
toTag in state.neededPubkeys:
|
2014-08-27 09:14:32 +02:00
|
|
|
# We already sent a request for the pubkey
|
2013-09-15 03:06:26 +02:00
|
|
|
sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET status='awaitingpubkey', '''
|
|
|
|
''' sleeptill=? WHERE toaddress=? '''
|
|
|
|
''' AND status='msgqueued' ''',
|
|
|
|
int(time.time()) + 2.5 * 24 * 60 * 60,
|
|
|
|
toaddress
|
|
|
|
)
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByToAddress', (
|
|
|
|
toaddress,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Encryption key was requested earlier."))
|
|
|
|
))
|
|
|
|
# on with the next msg on which we can do some work
|
|
|
|
continue
|
2014-08-27 09:14:32 +02:00
|
|
|
else:
|
|
|
|
# We have not yet sent a request for the pubkey
|
|
|
|
needToRequestPubkey = True
|
2017-09-21 17:24:51 +02:00
|
|
|
# If we are trying to send to address
|
|
|
|
# version >= 4 then the needed pubkey might be
|
|
|
|
# encrypted in the inventory.
|
|
|
|
# If we have it we'll need to decrypt it
|
|
|
|
# and put it in the pubkeys table.
|
|
|
|
|
|
|
|
# The decryptAndCheckPubkeyPayload function
|
|
|
|
# expects that the shared.neededPubkeys dictionary
|
|
|
|
# already contains the toAddress and cryptor
|
|
|
|
# object associated with the tag for this toAddress.
|
|
|
|
if toAddressVersionNumber >= 4:
|
|
|
|
doubleHashOfToAddressData = hashlib.sha512(
|
2018-10-10 12:49:48 +02:00
|
|
|
hashlib.sha512(
|
|
|
|
encodeVarint(toAddressVersionNumber) + encodeVarint(toStreamNumber) + toRipe
|
2017-09-21 17:24:51 +02:00
|
|
|
).digest()
|
|
|
|
).digest()
|
|
|
|
# The first half of the sha512 hash.
|
|
|
|
privEncryptionKey = doubleHashOfToAddressData[:32]
|
|
|
|
# The second half of the sha512 hash.
|
|
|
|
tag = doubleHashOfToAddressData[32:]
|
|
|
|
state.neededPubkeys[tag] = (
|
|
|
|
toaddress,
|
|
|
|
highlevelcrypto.makeCryptor(
|
|
|
|
hexlify(privEncryptionKey))
|
|
|
|
)
|
2016-03-18 02:01:59 +01:00
|
|
|
|
2017-01-10 21:15:35 +01:00
|
|
|
for value in Inventory().by_type_and_tag(1, toTag):
|
2017-09-21 17:24:51 +02:00
|
|
|
# if valid, this function also puts it
|
|
|
|
# in the pubkeys table.
|
2019-01-30 10:14:42 +01:00
|
|
|
if protocol.decryptAndCheckPubkeyPayload(
|
2017-09-21 17:24:51 +02:00
|
|
|
value.payload, toaddress
|
|
|
|
) == 'successful':
|
2016-03-18 02:01:59 +01:00
|
|
|
needToRequestPubkey = False
|
|
|
|
sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET '''
|
|
|
|
''' status='doingmsgpow', '''
|
|
|
|
''' retrynumber=0 WHERE '''
|
|
|
|
''' toaddress=? AND '''
|
|
|
|
''' (status='msgqueued' or '''
|
|
|
|
''' status='awaitingpubkey' or '''
|
2020-09-19 13:58:23 +02:00
|
|
|
''' status='doingpubkeypow') AND '''
|
|
|
|
''' folder='sent' ''',
|
2016-03-18 02:01:59 +01:00
|
|
|
toaddress)
|
2017-01-14 23:20:15 +01:00
|
|
|
del state.neededPubkeys[tag]
|
2016-03-18 02:01:59 +01:00
|
|
|
break
|
2017-09-21 17:24:51 +02:00
|
|
|
# else:
|
|
|
|
# There was something wrong with this
|
|
|
|
# pubkey object even though it had
|
|
|
|
# the correct tag- almost certainly
|
|
|
|
# because of malicious behavior or
|
|
|
|
# a badly programmed client. If there are
|
|
|
|
# any other pubkeys in our inventory
|
|
|
|
# with the correct tag then we'll try
|
|
|
|
# to decrypt those.
|
2014-08-27 09:14:32 +02:00
|
|
|
if needToRequestPubkey:
|
|
|
|
sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET '''
|
|
|
|
''' status='doingpubkeypow' WHERE '''
|
2020-09-19 13:58:23 +02:00
|
|
|
''' toaddress=? AND status='msgqueued' AND folder='sent' ''',
|
2017-09-21 17:24:51 +02:00
|
|
|
toaddress
|
|
|
|
)
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByToAddress', (
|
|
|
|
toaddress,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Sending a request for the"
|
|
|
|
" recipient\'s encryption key."))
|
|
|
|
))
|
2014-08-27 09:14:32 +02:00
|
|
|
self.requestPubKey(toaddress)
|
2017-09-21 17:24:51 +02:00
|
|
|
# on with the next msg on which we can do some work
|
|
|
|
continue
|
2018-03-21 12:52:23 +01:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# At this point we know that we have the necessary pubkey
|
|
|
|
# in the pubkeys table.
|
2018-03-21 12:52:23 +01:00
|
|
|
|
2017-02-25 23:40:37 +01:00
|
|
|
TTL *= 2**retryNumber
|
|
|
|
if TTL > 28 * 24 * 60 * 60:
|
|
|
|
TTL = 28 * 24 * 60 * 60
|
2018-03-21 12:52:23 +01:00
|
|
|
# add some randomness to the TTL
|
2017-09-21 17:24:51 +02:00
|
|
|
TTL = int(TTL + helper_random.randomrandrange(-300, 300))
|
2014-11-13 22:32:31 +01:00
|
|
|
embeddedTime = int(time.time() + TTL)
|
2018-03-21 12:52:23 +01:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# if we aren't sending this to ourselves or a chan
|
2022-01-28 13:55:23 +01:00
|
|
|
if not config.has_section(toaddress):
|
2019-11-05 17:54:04 +01:00
|
|
|
state.ackdataForWhichImWatching[ackdata] = 0
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Looking up the receiver\'s public key"))
|
|
|
|
))
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info('Sending a message.')
|
|
|
|
self.logger.debug(
|
2017-09-21 17:24:51 +02:00
|
|
|
'First 150 characters of message: %s',
|
|
|
|
repr(message[:150])
|
|
|
|
)
|
|
|
|
|
|
|
|
# Let us fetch the recipient's public key out of
|
|
|
|
# our database. If the required proof of work difficulty
|
|
|
|
# is too hard then we'll abort.
|
2013-09-30 01:24:27 +02:00
|
|
|
queryreturn = sqlQuery(
|
2015-03-09 07:35:32 +01:00
|
|
|
'SELECT transmitdata FROM pubkeys WHERE address=?',
|
|
|
|
toaddress)
|
2018-10-10 12:49:48 +02:00
|
|
|
for row in queryreturn: # pylint: disable=redefined-outer-name
|
2013-09-30 01:24:27 +02:00
|
|
|
pubkeyPayload, = row
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# The pubkey message is stored with the following items
|
|
|
|
# all appended:
|
2014-12-25 09:57:34 +01:00
|
|
|
# -address version
|
|
|
|
# -stream number
|
|
|
|
# -behavior bitfield
|
|
|
|
# -pub signing key
|
|
|
|
# -pub encryption key
|
2017-09-21 17:24:51 +02:00
|
|
|
# -nonce trials per byte (if address version is >= 3)
|
2014-12-25 09:57:34 +01:00
|
|
|
# -length extra bytes (if address version is >= 3)
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# to bypass the address version whose length is definitely 1
|
|
|
|
readPosition = 1
|
2018-05-02 17:29:55 +02:00
|
|
|
_, streamNumberLength = decodeVarint(
|
2013-06-21 23:32:22 +02:00
|
|
|
pubkeyPayload[readPosition:readPosition + 10])
|
2013-09-30 01:24:27 +02:00
|
|
|
readPosition += streamNumberLength
|
|
|
|
behaviorBitfield = pubkeyPayload[readPosition:readPosition + 4]
|
2017-09-21 17:24:51 +02:00
|
|
|
# Mobile users may ask us to include their address's
|
|
|
|
# RIPE hash on a message unencrypted. Before we actually
|
|
|
|
# do it the sending human must check a box
|
2013-09-30 01:24:27 +02:00
|
|
|
# in the settings menu to allow it.
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
|
|
# if receiver is a mobile device who expects that their
|
|
|
|
# address RIPE is included unencrypted on the front of
|
|
|
|
# the message..
|
2019-01-30 10:14:42 +01:00
|
|
|
if protocol.isBitSetWithinBitfield(behaviorBitfield, 30):
|
2017-09-21 17:24:51 +02:00
|
|
|
# if we are Not willing to include the receiver's
|
|
|
|
# RIPE hash on the message..
|
2022-01-28 13:55:23 +01:00
|
|
|
if not config.safeGetBoolean(
|
2017-09-21 17:24:51 +02:00
|
|
|
'bitmessagesettings', 'willinglysendtomobile'
|
|
|
|
):
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'The receiver is a mobile user but the'
|
|
|
|
' sender (you) has not selected that you'
|
|
|
|
' are willing to send to mobiles. Aborting'
|
|
|
|
' send.'
|
|
|
|
)
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Problem: Destination is a mobile"
|
|
|
|
" device who requests that the"
|
|
|
|
" destination be included in the"
|
|
|
|
" message but this is disallowed in"
|
|
|
|
" your settings. %1"
|
|
|
|
).arg(l10n.formatTimestamp()))
|
|
|
|
))
|
|
|
|
# if the human changes their setting and then
|
|
|
|
# sends another message or restarts their client,
|
|
|
|
# this one will send at that time.
|
2013-09-30 01:24:27 +02:00
|
|
|
continue
|
|
|
|
readPosition += 4 # to bypass the bitfield of behaviors
|
2017-09-21 17:24:51 +02:00
|
|
|
# We don't use this key for anything here.
|
|
|
|
# pubSigningKeyBase256 =
|
|
|
|
# pubkeyPayload[readPosition:readPosition+64]
|
2013-09-30 01:24:27 +02:00
|
|
|
readPosition += 64
|
|
|
|
pubEncryptionKeyBase256 = pubkeyPayload[
|
|
|
|
readPosition:readPosition + 64]
|
|
|
|
readPosition += 64
|
|
|
|
|
|
|
|
# Let us fetch the amount of work required by the recipient.
|
|
|
|
if toAddressVersionNumber == 2:
|
2017-09-21 17:24:51 +02:00
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte = \
|
|
|
|
defaults.networkDefaultProofOfWorkNonceTrialsPerByte
|
|
|
|
requiredPayloadLengthExtraBytes = \
|
|
|
|
defaults.networkDefaultPayloadLengthExtraBytes
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Doing work necessary to send message.\n"
|
|
|
|
"There is no required difficulty for"
|
|
|
|
" version 2 addresses like this."))
|
|
|
|
))
|
2013-09-30 01:24:27 +02:00
|
|
|
elif toAddressVersionNumber >= 3:
|
2017-09-21 17:24:51 +02:00
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte, \
|
|
|
|
varintLength = decodeVarint(
|
|
|
|
pubkeyPayload[readPosition:readPosition + 10])
|
2013-09-30 01:24:27 +02:00
|
|
|
readPosition += varintLength
|
2017-09-21 17:24:51 +02:00
|
|
|
requiredPayloadLengthExtraBytes, varintLength = \
|
|
|
|
decodeVarint(
|
|
|
|
pubkeyPayload[readPosition:readPosition + 10])
|
2013-09-30 01:24:27 +02:00
|
|
|
readPosition += varintLength
|
2017-09-21 17:24:51 +02:00
|
|
|
# We still have to meet a minimum POW difficulty
|
|
|
|
# regardless of what they say is allowed in order
|
|
|
|
# to get our message to propagate through the network.
|
|
|
|
if requiredAverageProofOfWorkNonceTrialsPerByte < \
|
2018-05-02 17:29:55 +02:00
|
|
|
defaults.networkDefaultProofOfWorkNonceTrialsPerByte:
|
2017-09-21 17:24:51 +02:00
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte = \
|
|
|
|
defaults.networkDefaultProofOfWorkNonceTrialsPerByte
|
|
|
|
if requiredPayloadLengthExtraBytes < \
|
|
|
|
defaults.networkDefaultPayloadLengthExtraBytes:
|
|
|
|
requiredPayloadLengthExtraBytes = \
|
|
|
|
defaults.networkDefaultPayloadLengthExtraBytes
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.debug(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Using averageProofOfWorkNonceTrialsPerByte: %s'
|
2018-03-22 11:10:40 +01:00
|
|
|
' and payloadLengthExtraBytes: %s.',
|
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte,
|
|
|
|
requiredPayloadLengthExtraBytes
|
|
|
|
)
|
2018-10-10 12:49:48 +02:00
|
|
|
|
|
|
|
queues.UISignalQueue.put(
|
|
|
|
(
|
|
|
|
'updateSentItemStatusByAckdata',
|
|
|
|
(
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Doing work necessary to send message.\n"
|
|
|
|
"Receiver\'s required difficulty: %1"
|
|
|
|
" and %2"
|
|
|
|
).arg(
|
|
|
|
str(
|
2021-08-16 18:39:16 +02:00
|
|
|
float(requiredAverageProofOfWorkNonceTrialsPerByte)
|
|
|
|
/ defaults.networkDefaultProofOfWorkNonceTrialsPerByte
|
2018-10-10 12:49:48 +02:00
|
|
|
)
|
|
|
|
).arg(
|
|
|
|
str(
|
2021-08-16 18:39:16 +02:00
|
|
|
float(requiredPayloadLengthExtraBytes)
|
|
|
|
/ defaults.networkDefaultPayloadLengthExtraBytes
|
2018-10-10 12:49:48 +02:00
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2013-09-30 01:24:27 +02:00
|
|
|
if status != 'forcepow':
|
2022-01-28 13:55:23 +01:00
|
|
|
maxacceptablenoncetrialsperbyte = config.getint(
|
2018-10-10 12:49:48 +02:00
|
|
|
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte')
|
2022-01-28 13:55:23 +01:00
|
|
|
maxacceptablepayloadlengthextrabytes = config.getint(
|
2018-10-10 12:49:48 +02:00
|
|
|
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')
|
|
|
|
cond1 = maxacceptablenoncetrialsperbyte and \
|
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte > maxacceptablenoncetrialsperbyte
|
|
|
|
cond2 = maxacceptablepayloadlengthextrabytes and \
|
|
|
|
requiredPayloadLengthExtraBytes > maxacceptablepayloadlengthextrabytes
|
|
|
|
|
|
|
|
if cond1 or cond2:
|
2017-09-21 17:24:51 +02:00
|
|
|
# The demanded difficulty is more than
|
|
|
|
# we are willing to do.
|
2013-09-30 01:24:27 +02:00
|
|
|
sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET status='toodifficult' '''
|
2020-09-19 13:58:23 +02:00
|
|
|
''' WHERE ackdata=? AND folder='sent' ''',
|
2013-09-30 01:24:27 +02:00
|
|
|
ackdata)
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Problem: The work demanded by"
|
|
|
|
" the recipient (%1 and %2) is"
|
|
|
|
" more difficult than you are"
|
|
|
|
" willing to do. %3"
|
2021-08-16 18:39:16 +02:00
|
|
|
).arg(str(float(requiredAverageProofOfWorkNonceTrialsPerByte)
|
|
|
|
/ defaults.networkDefaultProofOfWorkNonceTrialsPerByte)
|
|
|
|
).arg(str(float(requiredPayloadLengthExtraBytes)
|
|
|
|
/ defaults.networkDefaultPayloadLengthExtraBytes)
|
|
|
|
).arg(l10n.formatTimestamp()))))
|
2013-09-30 01:24:27 +02:00
|
|
|
continue
|
2017-09-21 17:24:51 +02:00
|
|
|
else: # if we are sending a message to ourselves or a chan..
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info('Sending a message.')
|
|
|
|
self.logger.debug(
|
|
|
|
'First 150 characters of message: %r', message[:150])
|
2017-01-11 14:27:19 +01:00
|
|
|
behaviorBitfield = protocol.getBitfield(fromaddress)
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2013-09-30 01:24:27 +02:00
|
|
|
try:
|
2022-01-28 13:55:23 +01:00
|
|
|
privEncryptionKeyBase58 = config.get(
|
2013-09-30 01:24:27 +02:00
|
|
|
toaddress, 'privencryptionkey')
|
2021-08-16 17:43:44 +02:00
|
|
|
except (configparser.NoSectionError, configparser.NoOptionError) as err:
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Problem: You are trying to send a"
|
|
|
|
" message to yourself or a chan but your"
|
|
|
|
" encryption key could not be found in"
|
|
|
|
" the keys.dat file. Could not encrypt"
|
|
|
|
" message. %1"
|
|
|
|
).arg(l10n.formatTimestamp()))
|
2018-03-22 11:10:40 +01:00
|
|
|
))
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.error(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Error within sendMsg. Could not read the keys'
|
2018-03-22 11:10:40 +01:00
|
|
|
' from the keys.dat file for our own address. %s\n',
|
|
|
|
err)
|
2013-09-30 01:24:27 +02:00
|
|
|
continue
|
2016-03-23 23:26:57 +01:00
|
|
|
privEncryptionKeyHex = hexlify(shared.decodeWalletImportFormat(
|
|
|
|
privEncryptionKeyBase58))
|
|
|
|
pubEncryptionKeyBase256 = unhexlify(highlevelcrypto.privToPub(
|
|
|
|
privEncryptionKeyHex))[1:]
|
2017-09-21 17:24:51 +02:00
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte = \
|
|
|
|
defaults.networkDefaultProofOfWorkNonceTrialsPerByte
|
|
|
|
requiredPayloadLengthExtraBytes = \
|
|
|
|
defaults.networkDefaultPayloadLengthExtraBytes
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Doing work necessary to send message."))
|
|
|
|
))
|
2013-08-26 21:23:12 +02:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
# Now we can start to assemble our message.
|
|
|
|
payload = encodeVarint(fromAddressVersionNumber)
|
|
|
|
payload += encodeVarint(fromStreamNumber)
|
2017-09-21 17:24:51 +02:00
|
|
|
# Bitfield of features and behaviors
|
|
|
|
# that can be expected from me. (See
|
|
|
|
# https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features)
|
|
|
|
payload += protocol.getBitfield(fromaddress)
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
# We need to convert our private keys to public keys in order
|
|
|
|
# to include them.
|
|
|
|
try:
|
2017-09-21 17:29:32 +02:00
|
|
|
privSigningKeyHex, privEncryptionKeyHex, \
|
|
|
|
pubSigningKey, pubEncryptionKey = self._getKeysForAddress(
|
|
|
|
fromaddress)
|
2021-08-16 17:43:44 +02:00
|
|
|
except (configparser.NoSectionError, configparser.NoOptionError) as err:
|
|
|
|
self.logger.warning("Section or Option did not found: %s", err)
|
2021-08-23 13:56:28 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Error! Could not find sender address"
|
|
|
|
" (your address) in the keys.dat file."))
|
|
|
|
))
|
2021-08-16 17:43:44 +02:00
|
|
|
except Exception as err:
|
|
|
|
self.logger.error(
|
|
|
|
'Error within sendMsg. Could not read'
|
|
|
|
' the keys from the keys.dat file for a requested'
|
|
|
|
' address. %s\n', err
|
|
|
|
)
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
2021-08-23 13:56:28 +02:00
|
|
|
"Error, can't send."))
|
2017-09-21 17:24:51 +02:00
|
|
|
))
|
2014-12-25 09:57:34 +01:00
|
|
|
continue
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2017-09-21 17:29:32 +02:00
|
|
|
payload += pubSigningKey + pubEncryptionKey
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2014-12-25 09:57:34 +01:00
|
|
|
if fromAddressVersionNumber >= 3:
|
2013-06-21 23:32:22 +02:00
|
|
|
# If the receiver of our message is in our address book,
|
|
|
|
# subscriptions list, or whitelist then we will allow them to
|
|
|
|
# do the network-minimum proof of work. Let us check to see if
|
|
|
|
# the receiver is in any of those lists.
|
2017-09-21 17:24:51 +02:00
|
|
|
if shared.isAddressInMyAddressBookSubscriptionsListOrWhitelist(
|
|
|
|
toaddress):
|
2013-06-21 23:32:22 +02:00
|
|
|
payload += encodeVarint(
|
2017-02-08 20:37:42 +01:00
|
|
|
defaults.networkDefaultProofOfWorkNonceTrialsPerByte)
|
2013-06-21 23:32:22 +02:00
|
|
|
payload += encodeVarint(
|
2017-02-08 20:37:42 +01:00
|
|
|
defaults.networkDefaultPayloadLengthExtraBytes)
|
2013-06-21 23:32:22 +02:00
|
|
|
else:
|
2022-01-28 13:55:23 +01:00
|
|
|
payload += encodeVarint(config.getint(
|
2013-06-21 23:32:22 +02:00
|
|
|
fromaddress, 'noncetrialsperbyte'))
|
2022-01-28 13:55:23 +01:00
|
|
|
payload += encodeVarint(config.getint(
|
2013-06-21 23:32:22 +02:00
|
|
|
fromaddress, 'payloadlengthextrabytes'))
|
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# This hash will be checked by the receiver of the message
|
|
|
|
# to verify that toRipe belongs to them. This prevents
|
|
|
|
# a Surreptitious Forwarding Attack.
|
|
|
|
payload += toRipe
|
|
|
|
payload += encodeVarint(encoding) # message encoding type
|
|
|
|
encodedMessage = helper_msgcoding.MsgEncode(
|
|
|
|
{"subject": subject, "body": message}, encoding
|
|
|
|
)
|
2016-11-14 20:23:58 +01:00
|
|
|
payload += encodeVarint(encodedMessage.length)
|
|
|
|
payload += encodedMessage.data
|
2022-01-28 13:55:23 +01:00
|
|
|
if config.has_section(toaddress):
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Not bothering to include ackdata because we are'
|
|
|
|
' sending to ourselves or a chan.'
|
|
|
|
)
|
|
|
|
fullAckPayload = ''
|
|
|
|
elif not protocol.checkBitfield(
|
|
|
|
behaviorBitfield, protocol.BITFIELD_DOESACK):
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Not bothering to include ackdata because'
|
|
|
|
' the receiver said that they won\'t relay it anyway.'
|
|
|
|
)
|
2014-12-25 09:57:34 +01:00
|
|
|
fullAckPayload = ''
|
|
|
|
else:
|
2017-09-21 17:24:51 +02:00
|
|
|
# The fullAckPayload is a normal msg protocol message
|
|
|
|
# with the proof of work already completed that the
|
|
|
|
# receiver of this message can easily send out.
|
2014-12-25 09:57:34 +01:00
|
|
|
fullAckPayload = self.generateFullAckMessage(
|
2017-09-21 17:24:51 +02:00
|
|
|
ackdata, toStreamNumber, TTL)
|
2014-12-25 09:57:34 +01:00
|
|
|
payload += encodeVarint(len(fullAckPayload))
|
|
|
|
payload += fullAckPayload
|
2017-09-21 17:24:51 +02:00
|
|
|
dataToSign = pack('>Q', embeddedTime) + '\x00\x00\x00\x02' + \
|
|
|
|
encodeVarint(1) + encodeVarint(toStreamNumber) + payload
|
2014-12-25 09:57:34 +01:00
|
|
|
signature = highlevelcrypto.sign(dataToSign, privSigningKeyHex)
|
|
|
|
payload += encodeVarint(len(signature))
|
|
|
|
payload += signature
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
# We have assembled the data that will be encrypted.
|
|
|
|
try:
|
2017-09-21 17:24:51 +02:00
|
|
|
encrypted = highlevelcrypto.encrypt(
|
|
|
|
payload, "04" + hexlify(pubEncryptionKeyBase256)
|
|
|
|
)
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("highlevelcrypto.encrypt didn't work")
|
2017-09-21 17:24:51 +02:00
|
|
|
sqlExecute(
|
2020-09-19 13:58:23 +02:00
|
|
|
'''UPDATE sent SET status='badkey' WHERE ackdata=? AND folder='sent' ''',
|
2017-09-21 17:24:51 +02:00
|
|
|
ackdata
|
|
|
|
)
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Problem: The recipient\'s encryption key is"
|
|
|
|
" no good. Could not encrypt message. %1"
|
|
|
|
).arg(l10n.formatTimestamp()))
|
|
|
|
))
|
2013-06-21 23:32:22 +02:00
|
|
|
continue
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
encryptedPayload = pack('>Q', embeddedTime)
|
2017-09-21 17:24:51 +02:00
|
|
|
encryptedPayload += '\x00\x00\x00\x02' # object type: msg
|
|
|
|
encryptedPayload += encodeVarint(1) # msg version
|
2014-08-27 09:14:32 +02:00
|
|
|
encryptedPayload += encodeVarint(toStreamNumber) + encrypted
|
2017-09-21 17:24:51 +02:00
|
|
|
target = 2 ** 64 / (
|
|
|
|
requiredAverageProofOfWorkNonceTrialsPerByte * (
|
2021-08-16 18:39:16 +02:00
|
|
|
len(encryptedPayload) + 8
|
|
|
|
+ requiredPayloadLengthExtraBytes + ((
|
2017-09-21 17:24:51 +02:00
|
|
|
TTL * (
|
2021-08-16 18:39:16 +02:00
|
|
|
len(encryptedPayload) + 8
|
|
|
|
+ requiredPayloadLengthExtraBytes
|
2017-09-21 17:24:51 +02:00
|
|
|
)) / (2 ** 16))
|
|
|
|
))
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'(For msg message) Doing proof of work. Total required'
|
|
|
|
' difficulty: %f. Required small message difficulty: %f.',
|
2021-08-16 18:39:16 +02:00
|
|
|
float(requiredAverageProofOfWorkNonceTrialsPerByte)
|
|
|
|
/ defaults.networkDefaultProofOfWorkNonceTrialsPerByte,
|
|
|
|
float(requiredPayloadLengthExtraBytes)
|
|
|
|
/ defaults.networkDefaultPayloadLengthExtraBytes
|
2017-09-21 17:24:51 +02:00
|
|
|
)
|
2013-06-29 19:29:35 +02:00
|
|
|
|
2013-06-21 23:32:22 +02:00
|
|
|
powStartTime = time.time()
|
|
|
|
initialHash = hashlib.sha512(encryptedPayload).digest()
|
|
|
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'(For msg message) Found proof of work %s Nonce: %s',
|
|
|
|
trialValue, nonce
|
|
|
|
)
|
2015-11-18 16:22:17 +01:00
|
|
|
try:
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'PoW took %.1f seconds, speed %s.',
|
|
|
|
time.time() - powStartTime,
|
|
|
|
sizeof_fmt(nonce / (time.time() - powStartTime))
|
|
|
|
)
|
2021-08-16 17:43:44 +02:00
|
|
|
except: # noqa:E722
|
|
|
|
self.logger.warning("Proof of Work exception")
|
2013-06-29 19:29:35 +02:00
|
|
|
|
2013-06-21 23:32:22 +02:00
|
|
|
encryptedPayload = pack('>Q', nonce) + encryptedPayload
|
2017-09-21 17:24:51 +02:00
|
|
|
|
|
|
|
# Sanity check. The encryptedPayload size should never be
|
|
|
|
# larger than 256 KiB. There should be checks elsewhere
|
|
|
|
# in the code to not let the user try to send a message
|
|
|
|
# this large until we implement message continuation.
|
|
|
|
if len(encryptedPayload) > 2 ** 18: # 256 KiB
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.critical(
|
2017-09-21 17:24:51 +02:00
|
|
|
'This msg object is too large to send. This should'
|
|
|
|
' never happen. Object size: %i',
|
|
|
|
len(encryptedPayload)
|
|
|
|
)
|
2014-08-27 09:14:32 +02:00
|
|
|
continue
|
2013-06-21 23:32:22 +02:00
|
|
|
|
|
|
|
inventoryHash = calculateInventoryHash(encryptedPayload)
|
2014-08-27 09:14:32 +02:00
|
|
|
objectType = 2
|
2017-01-10 21:15:35 +01:00
|
|
|
Inventory()[inventoryHash] = (
|
2014-08-27 09:14:32 +02:00
|
|
|
objectType, toStreamNumber, encryptedPayload, embeddedTime, '')
|
2022-01-28 13:55:23 +01:00
|
|
|
if config.has_section(toaddress) or \
|
2018-10-10 12:49:48 +02:00
|
|
|
not protocol.checkBitfield(behaviorBitfield, protocol.BITFIELD_DOESACK):
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Message sent. Sent at %1"
|
2018-10-10 12:49:48 +02:00
|
|
|
).arg(l10n.formatTimestamp()))))
|
2013-07-22 07:10:22 +02:00
|
|
|
else:
|
2013-09-30 01:24:27 +02:00
|
|
|
# not sending to a chan or one of my addresses
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByAckdata', (
|
|
|
|
ackdata,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Message sent. Waiting for acknowledgement."
|
|
|
|
" Sent on %1"
|
|
|
|
).arg(l10n.formatTimestamp()))
|
|
|
|
))
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Broadcasting inv for my msg(within sendmsg function): %s',
|
|
|
|
hexlify(inventoryHash)
|
|
|
|
)
|
2017-08-09 17:36:52 +02:00
|
|
|
queues.invQueue.put((toStreamNumber, inventoryHash))
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# Update the sent message in the sent table with the
|
|
|
|
# necessary information.
|
2022-01-28 13:55:23 +01:00
|
|
|
if config.has_section(toaddress) or \
|
2018-10-10 12:49:48 +02:00
|
|
|
not protocol.checkBitfield(behaviorBitfield, protocol.BITFIELD_DOESACK):
|
2013-07-22 07:10:22 +02:00
|
|
|
newStatus = 'msgsentnoackexpected'
|
|
|
|
else:
|
|
|
|
newStatus = 'msgsent'
|
2017-02-25 23:40:37 +01:00
|
|
|
# wait 10% past expiration
|
|
|
|
sleepTill = int(time.time() + TTL * 1.1)
|
2020-10-12 19:47:05 +02:00
|
|
|
sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET msgid=?, status=?, retrynumber=?, '''
|
2020-09-19 13:58:23 +02:00
|
|
|
''' sleeptill=?, lastactiontime=? WHERE ackdata=? AND folder='sent' ''',
|
2017-09-21 17:24:51 +02:00
|
|
|
inventoryHash, newStatus, retryNumber + 1,
|
|
|
|
sleepTill, int(time.time()), ackdata
|
|
|
|
)
|
2020-10-14 17:36:20 +02:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# If we are sending to ourselves or a chan, let's put
|
|
|
|
# the message in our own inbox.
|
2022-01-28 13:55:23 +01:00
|
|
|
if config.has_section(toaddress):
|
2017-09-21 17:24:51 +02:00
|
|
|
# Used to detect and ignore duplicate messages in our inbox
|
|
|
|
sigHash = hashlib.sha512(hashlib.sha512(
|
|
|
|
signature).digest()).digest()[32:]
|
2013-09-30 05:01:56 +02:00
|
|
|
t = (inventoryHash, toaddress, fromaddress, subject, int(
|
2016-11-15 17:07:53 +01:00
|
|
|
time.time()), message, 'inbox', encoding, 0, sigHash)
|
2013-09-30 05:01:56 +02:00
|
|
|
helper_inbox.insert(t)
|
|
|
|
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put(('displayNewInboxMessage', (
|
2013-09-30 05:01:56 +02:00
|
|
|
inventoryHash, toaddress, fromaddress, subject, message)))
|
|
|
|
|
|
|
|
# If we are behaving as an API then we might need to run an
|
|
|
|
# outside command to let some program know that a new message
|
|
|
|
# has arrived.
|
2022-01-28 13:55:23 +01:00
|
|
|
if config.safeGetBoolean(
|
2017-09-21 17:24:51 +02:00
|
|
|
'bitmessagesettings', 'apienabled'):
|
2021-08-16 17:43:44 +02:00
|
|
|
|
2022-01-28 13:55:23 +01:00
|
|
|
apiNotifyPath = config.safeGet(
|
2021-08-16 17:43:44 +02:00
|
|
|
'bitmessagesettings', 'apinotifypath')
|
|
|
|
|
|
|
|
if apiNotifyPath:
|
2024-02-23 19:20:04 +01:00
|
|
|
# There is no additional risk of remote exploitation or
|
|
|
|
# privilege escalation
|
|
|
|
call([apiNotifyPath, "newMessage"]) # nosec:B603
|
2013-09-30 05:01:56 +02:00
|
|
|
|
2013-06-21 23:32:22 +02:00
|
|
|
def requestPubKey(self, toAddress):
|
2018-10-10 12:49:48 +02:00
|
|
|
"""Send a getpubkey object"""
|
2013-06-21 23:32:22 +02:00
|
|
|
toStatus, addressVersionNumber, streamNumber, ripe = decodeAddress(
|
|
|
|
toAddress)
|
|
|
|
if toStatus != 'success':
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.error(
|
2017-09-21 17:24:51 +02:00
|
|
|
'Very abnormal error occurred in requestPubKey.'
|
|
|
|
' toAddress is: %r. Please report this error to Atheros.',
|
|
|
|
toAddress
|
|
|
|
)
|
2013-06-21 23:32:22 +02:00
|
|
|
return
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2015-03-09 07:35:32 +01:00
|
|
|
queryReturn = sqlQuery(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''SELECT retrynumber FROM sent WHERE toaddress=? '''
|
|
|
|
''' AND (status='doingpubkeypow' OR status='awaitingpubkey') '''
|
2020-09-19 13:58:23 +02:00
|
|
|
''' AND folder='sent' LIMIT 1''',
|
2017-09-21 17:24:51 +02:00
|
|
|
toAddress
|
|
|
|
)
|
2018-10-10 12:49:48 +02:00
|
|
|
if not queryReturn:
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.critical(
|
2017-09-21 17:24:51 +02:00
|
|
|
'BUG: Why are we requesting the pubkey for %s'
|
|
|
|
' if there are no messages in the sent folder'
|
|
|
|
' to that address?', toAddress
|
|
|
|
)
|
2015-03-09 07:35:32 +01:00
|
|
|
return
|
|
|
|
retryNumber = queryReturn[0][0]
|
|
|
|
|
2013-09-13 06:27:34 +02:00
|
|
|
if addressVersionNumber <= 3:
|
2017-01-14 23:20:15 +01:00
|
|
|
state.neededPubkeys[toAddress] = 0
|
2013-09-13 06:27:34 +02:00
|
|
|
elif addressVersionNumber >= 4:
|
2017-09-21 17:24:51 +02:00
|
|
|
# If the user just clicked 'send' then the tag
|
|
|
|
# (and other information) will already be in the
|
|
|
|
# neededPubkeys dictionary. But if we are recovering
|
|
|
|
# from a restart of the client then we have to put it in now.
|
|
|
|
|
|
|
|
# Note that this is the first half of the sha512 hash.
|
|
|
|
privEncryptionKey = hashlib.sha512(hashlib.sha512(
|
2021-08-16 18:39:16 +02:00
|
|
|
encodeVarint(addressVersionNumber)
|
|
|
|
+ encodeVarint(streamNumber) + ripe
|
2017-09-21 17:24:51 +02:00
|
|
|
).digest()).digest()[:32]
|
|
|
|
# Note that this is the second half of the sha512 hash.
|
|
|
|
tag = hashlib.sha512(hashlib.sha512(
|
2021-08-16 18:39:16 +02:00
|
|
|
encodeVarint(addressVersionNumber)
|
|
|
|
+ encodeVarint(streamNumber) + ripe
|
2017-09-21 17:24:51 +02:00
|
|
|
).digest()).digest()[32:]
|
2017-01-14 23:20:15 +01:00
|
|
|
if tag not in state.neededPubkeys:
|
2017-09-21 17:24:51 +02:00
|
|
|
# We'll need this for when we receive a pubkey reply:
|
|
|
|
# it will be encrypted and we'll need to decrypt it.
|
|
|
|
state.neededPubkeys[tag] = (
|
|
|
|
toAddress,
|
|
|
|
highlevelcrypto.makeCryptor(hexlify(privEncryptionKey))
|
|
|
|
)
|
|
|
|
|
|
|
|
# 2.5 days. This was chosen fairly arbitrarily.
|
|
|
|
TTL = 2.5 * 24 * 60 * 60
|
|
|
|
TTL *= 2 ** retryNumber
|
|
|
|
if TTL > 28 * 24 * 60 * 60:
|
|
|
|
TTL = 28 * 24 * 60 * 60
|
|
|
|
# add some randomness to the TTL
|
|
|
|
TTL = TTL + helper_random.randomrandrange(-300, 300)
|
2014-11-13 22:32:31 +01:00
|
|
|
embeddedTime = int(time.time() + TTL)
|
2014-08-27 09:14:32 +02:00
|
|
|
payload = pack('>Q', embeddedTime)
|
2017-09-21 17:24:51 +02:00
|
|
|
payload += '\x00\x00\x00\x00' # object type: getpubkey
|
2013-06-21 23:32:22 +02:00
|
|
|
payload += encodeVarint(addressVersionNumber)
|
|
|
|
payload += encodeVarint(streamNumber)
|
2013-09-13 06:27:34 +02:00
|
|
|
if addressVersionNumber <= 3:
|
|
|
|
payload += ripe
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'making request for pubkey with ripe: %s', hexlify(ripe))
|
2013-09-13 06:27:34 +02:00
|
|
|
else:
|
2013-09-15 03:06:26 +02:00
|
|
|
payload += tag
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info(
|
2017-09-21 17:24:51 +02:00
|
|
|
'making request for v4 pubkey with tag: %s', hexlify(tag))
|
2013-06-29 19:29:35 +02:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
statusbar = 'Doing the computations necessary to request' +\
|
|
|
|
' the recipient\'s public key.'
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put(('updateStatusBar', statusbar))
|
2017-09-21 17:24:51 +02:00
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByToAddress', (
|
|
|
|
toAddress,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Doing work necessary to request encryption key."))
|
|
|
|
))
|
|
|
|
|
2017-09-21 17:29:32 +02:00
|
|
|
payload = self._doPOWDefaults(payload, TTL)
|
2013-06-29 19:29:35 +02:00
|
|
|
|
2013-06-21 23:32:22 +02:00
|
|
|
inventoryHash = calculateInventoryHash(payload)
|
2014-08-27 09:14:32 +02:00
|
|
|
objectType = 1
|
2017-01-10 21:15:35 +01:00
|
|
|
Inventory()[inventoryHash] = (
|
2014-08-27 09:14:32 +02:00
|
|
|
objectType, streamNumber, payload, embeddedTime, '')
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.info('sending inv (for the getpubkey message)')
|
2017-08-09 17:36:52 +02:00
|
|
|
queues.invQueue.put((streamNumber, inventoryHash))
|
2017-09-21 17:24:51 +02:00
|
|
|
|
2017-02-25 23:40:37 +01:00
|
|
|
# wait 10% past expiration
|
|
|
|
sleeptill = int(time.time() + TTL * 1.1)
|
2013-08-29 13:27:30 +02:00
|
|
|
sqlExecute(
|
2017-09-21 17:24:51 +02:00
|
|
|
'''UPDATE sent SET lastactiontime=?, '''
|
|
|
|
''' status='awaitingpubkey', retrynumber=?, sleeptill=? '''
|
|
|
|
''' WHERE toaddress=? AND (status='doingpubkeypow' OR '''
|
2020-09-19 13:58:23 +02:00
|
|
|
''' status='awaitingpubkey') AND folder='sent' ''',
|
2017-09-21 17:24:51 +02:00
|
|
|
int(time.time()), retryNumber + 1, sleeptill, toAddress)
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put((
|
2017-09-21 17:24:51 +02:00
|
|
|
'updateStatusBar',
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Broadcasting the public key request. This program will"
|
|
|
|
" auto-retry if they are offline.")
|
|
|
|
))
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateSentItemStatusByToAddress', (
|
|
|
|
toAddress,
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
"Sending public key request. Waiting for reply."
|
|
|
|
" Requested at %1"
|
|
|
|
).arg(l10n.formatTimestamp()))
|
|
|
|
))
|
2013-06-21 23:32:22 +02:00
|
|
|
|
2018-10-10 12:49:48 +02:00
|
|
|
def generateFullAckMessage(self, ackdata, _, TTL):
|
|
|
|
"""
|
|
|
|
It might be perfectly fine to just use the same TTL for the ackdata that we use for the message. But I would
|
|
|
|
rather it be more difficult for attackers to associate ackData with the associated msg object. However, users
|
|
|
|
would want the TTL of the acknowledgement to be about the same as they set for the message itself. So let's set
|
|
|
|
the TTL of the acknowledgement to be in one of three 'buckets': 1 hour, 7 days, or 28 days, whichever is
|
|
|
|
relatively close to what the user specified.
|
|
|
|
"""
|
2017-09-21 17:24:51 +02:00
|
|
|
if TTL < 24 * 60 * 60: # 1 day
|
|
|
|
TTL = 24 * 60 * 60 # 1 day
|
|
|
|
elif TTL < 7 * 24 * 60 * 60: # 1 week
|
|
|
|
TTL = 7 * 24 * 60 * 60 # 1 week
|
2015-03-09 07:35:32 +01:00
|
|
|
else:
|
2017-09-21 17:24:51 +02:00
|
|
|
TTL = 28 * 24 * 60 * 60 # 4 weeks
|
2018-03-21 12:52:23 +01:00
|
|
|
# Add some randomness to the TTL
|
2017-09-21 17:24:51 +02:00
|
|
|
TTL = int(TTL + helper_random.randomrandrange(-300, 300))
|
2014-11-13 22:32:31 +01:00
|
|
|
embeddedTime = int(time.time() + TTL)
|
2017-09-30 11:19:44 +02:00
|
|
|
|
2017-09-21 17:24:51 +02:00
|
|
|
# type/version/stream already included
|
2017-09-30 11:19:44 +02:00
|
|
|
payload = pack('>Q', (embeddedTime)) + ackdata
|
|
|
|
|
2017-09-21 17:29:32 +02:00
|
|
|
payload = self._doPOWDefaults(
|
|
|
|
payload, TTL, log_prefix='(For ack message)', log_time=True)
|
2013-06-29 19:29:35 +02:00
|
|
|
|
2017-01-11 14:27:19 +01:00
|
|
|
return protocol.CreatePacket('object', payload)
|