2014-08-27 09:14:32 +02:00
|
|
|
"""
|
2019-11-03 13:09:00 +01:00
|
|
|
The `singleCleaner` class is a timer-driven thread that cleans data structures
|
2018-05-02 17:29:55 +02:00
|
|
|
to free memory, resends messages when a remote node doesn't respond, and
|
2014-08-27 09:14:32 +02:00
|
|
|
sends pong messages to keep connections alive if the network isn't busy.
|
2019-10-10 15:38:13 +02:00
|
|
|
|
2013-06-20 23:23:03 +02:00
|
|
|
It cleans these data structures in memory:
|
2019-10-10 15:38:13 +02:00
|
|
|
- inventory (moves data to the on-disk sql database)
|
|
|
|
- inventorySets (clears then reloads data out of sql database)
|
2013-06-20 23:23:03 +02:00
|
|
|
|
|
|
|
It cleans these tables on the disk:
|
2019-10-10 15:38:13 +02:00
|
|
|
- inventory (clears expired objects)
|
|
|
|
- pubkeys (clears pubkeys older than 4 weeks old which we have not used
|
|
|
|
personally)
|
|
|
|
- knownNodes (clears addresses which have not been online for over 3 days)
|
2013-06-20 23:23:03 +02:00
|
|
|
|
|
|
|
It resends messages when there has been no response:
|
2019-10-10 15:38:13 +02:00
|
|
|
- resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...)
|
|
|
|
- resends msg messages in 5 days (then 10 days, then 20 days, etc...)
|
2013-06-20 23:23:03 +02:00
|
|
|
|
2014-08-27 09:14:32 +02:00
|
|
|
"""
|
2019-09-20 13:19:04 +02:00
|
|
|
# pylint: disable=relative-import, protected-access
|
2018-05-02 17:29:55 +02:00
|
|
|
import gc
|
2017-09-27 17:25:14 +02:00
|
|
|
import os
|
2018-05-02 17:29:55 +02:00
|
|
|
import time
|
2019-09-20 13:19:04 +02:00
|
|
|
import shared
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2019-08-06 13:04:33 +02:00
|
|
|
import knownnodes
|
|
|
|
import queues
|
2019-11-04 15:47:36 +01:00
|
|
|
import shared
|
2019-08-06 13:04:33 +02:00
|
|
|
import state
|
2017-09-27 17:25:14 +02:00
|
|
|
import tr
|
2018-05-02 17:29:55 +02:00
|
|
|
from bmconfigparser import BMConfigParser
|
2017-09-27 17:25:14 +02:00
|
|
|
from helper_sql import sqlQuery, sqlExecute
|
2018-05-02 17:29:55 +02:00
|
|
|
from inventory import Inventory
|
2019-10-27 14:15:45 +01:00
|
|
|
from network import BMConnectionPool, StoppableThread
|
2018-05-02 17:29:55 +02:00
|
|
|
|
|
|
|
|
2019-08-01 13:37:26 +02:00
|
|
|
class singleCleaner(StoppableThread):
|
2019-11-04 15:47:36 +01:00
|
|
|
"""The singleCleaner thread class"""
|
2019-08-01 13:37:26 +02:00
|
|
|
name = "singleCleaner"
|
2017-05-29 00:24:07 +02:00
|
|
|
cycleLength = 300
|
2017-08-06 21:29:54 +02:00
|
|
|
expireDiscoveredPeers = 300
|
2013-06-20 23:23:03 +02:00
|
|
|
|
2019-11-04 15:47:36 +01:00
|
|
|
def run(self): # pylint: disable=too-many-branches
|
2017-11-22 14:49:18 +01:00
|
|
|
gc.disable()
|
2013-06-20 23:23:03 +02:00
|
|
|
timeWeLastClearedInventoryAndPubkeysTables = 0
|
2013-11-04 08:05:07 +01:00
|
|
|
try:
|
2017-09-27 17:25:14 +02:00
|
|
|
shared.maximumLengthOfTimeToBotherResendingMessages = (
|
|
|
|
float(BMConfigParser().get(
|
2019-11-03 13:09:00 +01:00
|
|
|
'bitmessagesettings', 'stopresendingafterxdays'))
|
|
|
|
* 24 * 60 * 60
|
2017-09-27 17:25:14 +02:00
|
|
|
) + (
|
|
|
|
float(BMConfigParser().get(
|
2019-11-03 13:09:00 +01:00
|
|
|
'bitmessagesettings', 'stopresendingafterxmonths'))
|
|
|
|
* (60 * 60 * 24 * 365) / 12)
|
2013-11-04 08:05:07 +01:00
|
|
|
except:
|
2017-09-27 17:25:14 +02:00
|
|
|
# Either the user hasn't set stopresendingafterxdays and
|
|
|
|
# stopresendingafterxmonths yet or the options are missing
|
|
|
|
# from the config file.
|
2013-11-04 08:05:07 +01:00
|
|
|
shared.maximumLengthOfTimeToBotherResendingMessages = float('inf')
|
2013-06-20 23:23:03 +02:00
|
|
|
|
2017-05-27 19:01:14 +02:00
|
|
|
# initial wait
|
|
|
|
if state.shutdown == 0:
|
2017-05-29 00:24:07 +02:00
|
|
|
self.stop.wait(singleCleaner.cycleLength)
|
2017-05-27 19:01:14 +02:00
|
|
|
|
2017-01-14 23:20:15 +01:00
|
|
|
while state.shutdown == 0:
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put((
|
2017-09-27 17:25:14 +02:00
|
|
|
'updateStatusBar',
|
|
|
|
'Doing housekeeping (Flushing inventory in memory to disk...)'
|
|
|
|
))
|
2017-01-10 21:15:35 +01:00
|
|
|
Inventory().flush()
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.put(('updateStatusBar', ''))
|
2018-05-02 17:29:55 +02:00
|
|
|
|
2013-06-20 23:23:03 +02:00
|
|
|
# If we are running as a daemon then we are going to fill up the UI
|
|
|
|
# queue which will never be handled by a UI. We should clear it to
|
|
|
|
# save memory.
|
2019-09-20 13:19:04 +02:00
|
|
|
# ..FIXME redundant?
|
2017-09-27 17:25:14 +02:00
|
|
|
if shared.thisapp.daemon or not state.enableGUI:
|
2017-02-08 13:41:56 +01:00
|
|
|
queues.UISignalQueue.queue.clear()
|
2017-09-27 17:25:14 +02:00
|
|
|
if timeWeLastClearedInventoryAndPubkeysTables < \
|
|
|
|
int(time.time()) - 7380:
|
2013-06-20 23:23:03 +02:00
|
|
|
timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
|
2017-01-10 21:15:35 +01:00
|
|
|
Inventory().clean()
|
2019-06-14 11:38:48 +02:00
|
|
|
queues.workerQueue.put(('sendOnionPeerObj', ''))
|
2013-06-20 23:23:03 +02:00
|
|
|
# pubkeys
|
2013-08-27 14:46:57 +02:00
|
|
|
sqlExecute(
|
2017-09-27 17:25:14 +02:00
|
|
|
"DELETE FROM pubkeys WHERE time<? AND usedpersonally='no'",
|
2013-08-27 14:46:57 +02:00
|
|
|
int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys)
|
2013-06-20 23:23:03 +02:00
|
|
|
|
2017-09-27 17:25:14 +02:00
|
|
|
# Let us resend getpubkey objects if we have not yet heard
|
|
|
|
# a pubkey, and also msg objects if we have not yet heard
|
|
|
|
# an acknowledgement
|
2013-08-27 14:46:57 +02:00
|
|
|
queryreturn = sqlQuery(
|
2017-09-27 17:25:14 +02:00
|
|
|
"SELECT toaddress, ackdata, status FROM sent"
|
|
|
|
" WHERE ((status='awaitingpubkey' OR status='msgsent')"
|
|
|
|
" AND folder='sent' AND sleeptill<? AND senttime>?)",
|
2019-11-03 13:09:00 +01:00
|
|
|
int(time.time()), int(time.time())
|
|
|
|
- shared.maximumLengthOfTimeToBotherResendingMessages
|
2017-09-27 17:25:14 +02:00
|
|
|
)
|
2013-06-20 23:23:03 +02:00
|
|
|
for row in queryreturn:
|
2015-03-09 07:35:32 +01:00
|
|
|
if len(row) < 2:
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.error(
|
2017-09-27 17:25:14 +02:00
|
|
|
'Something went wrong in the singleCleaner thread:'
|
|
|
|
' a query did not return the requested fields. %r',
|
|
|
|
row
|
|
|
|
)
|
2015-11-24 01:55:17 +01:00
|
|
|
self.stop.wait(3)
|
2013-06-20 23:23:03 +02:00
|
|
|
break
|
2015-03-09 07:35:32 +01:00
|
|
|
toAddress, ackData, status = row
|
2013-06-20 23:23:03 +02:00
|
|
|
if status == 'awaitingpubkey':
|
2019-08-06 13:04:33 +02:00
|
|
|
self.resendPubkeyRequest(toAddress)
|
2015-03-09 07:35:32 +01:00
|
|
|
elif status == 'msgsent':
|
2019-08-06 13:04:33 +02:00
|
|
|
self.resendMsg(ackData)
|
2019-11-26 14:19:44 +01:00
|
|
|
deleteTrashMsgPermonantly()
|
2018-10-03 15:07:06 +02:00
|
|
|
try:
|
|
|
|
# Cleanup knownnodes and handle possible severe exception
|
|
|
|
# while writing it to disk
|
|
|
|
knownnodes.cleanupKnownNodes()
|
|
|
|
except Exception as err:
|
2019-11-04 15:47:36 +01:00
|
|
|
# pylint: disable=protected-access
|
2018-10-03 15:07:06 +02:00
|
|
|
if "Errno 28" in str(err):
|
2019-08-06 13:04:33 +02:00
|
|
|
self.logger.fatal(
|
2018-10-03 15:07:06 +02:00
|
|
|
'(while writing knownnodes to disk)'
|
|
|
|
' Alert: Your disk or data storage volume is full.'
|
|
|
|
)
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'alert',
|
|
|
|
(tr._translate("MainWindow", "Disk full"),
|
|
|
|
tr._translate(
|
|
|
|
"MainWindow",
|
|
|
|
'Alert: Your disk or data storage volume'
|
|
|
|
' is full. Bitmessage will now exit.'),
|
2019-09-20 13:19:04 +02:00
|
|
|
True)
|
2018-10-03 15:07:06 +02:00
|
|
|
))
|
2019-11-04 15:47:36 +01:00
|
|
|
if shared.thisapp.daemon or not state.enableGUI:
|
2018-10-03 15:07:06 +02:00
|
|
|
os._exit(1)
|
2017-02-06 11:02:03 +01:00
|
|
|
|
2017-06-02 07:09:35 +02:00
|
|
|
# inv/object tracking
|
2019-11-03 13:09:00 +01:00
|
|
|
for connection in BMConnectionPool().connections():
|
2017-06-02 07:09:35 +02:00
|
|
|
connection.clean()
|
|
|
|
|
2017-08-06 21:29:54 +02:00
|
|
|
# discovery tracking
|
2017-08-06 21:38:23 +02:00
|
|
|
exp = time.time() - singleCleaner.expireDiscoveredPeers
|
2017-08-06 21:29:54 +02:00
|
|
|
reaper = (k for k, v in state.discoveredPeers.items() if v < exp)
|
|
|
|
for k in reaper:
|
|
|
|
try:
|
|
|
|
del state.discoveredPeers[k]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2019-12-18 12:08:10 +01:00
|
|
|
|
2019-11-04 15:47:36 +01:00
|
|
|
# ..todo:: cleanup pending upload / download
|
2017-02-06 11:02:03 +01:00
|
|
|
|
2017-11-22 14:49:18 +01:00
|
|
|
gc.collect()
|
|
|
|
|
2017-02-02 15:52:32 +01:00
|
|
|
if state.shutdown == 0:
|
2017-05-29 00:24:07 +02:00
|
|
|
self.stop.wait(singleCleaner.cycleLength)
|
2013-11-04 08:05:07 +01:00
|
|
|
|
2019-08-06 13:04:33 +02:00
|
|
|
def resendPubkeyRequest(self, address):
|
|
|
|
"""Resend pubkey request for address"""
|
|
|
|
self.logger.debug(
|
|
|
|
'It has been a long time and we haven\'t heard a response to our'
|
|
|
|
' getpubkey request. Sending again.'
|
|
|
|
)
|
|
|
|
try:
|
|
|
|
# We need to take this entry out of the neededPubkeys structure
|
|
|
|
# because the queues.workerQueue checks to see whether the entry
|
|
|
|
# is already present and will not do the POW and send the message
|
|
|
|
# because it assumes that it has already done it recently.
|
|
|
|
del state.neededPubkeys[address]
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateStatusBar',
|
|
|
|
'Doing work necessary to again attempt to request a public key...'
|
|
|
|
))
|
|
|
|
sqlExecute(
|
|
|
|
'''UPDATE sent SET status='msgqueued' WHERE toaddress=?''',
|
|
|
|
address)
|
|
|
|
queues.workerQueue.put(('sendmessage', ''))
|
|
|
|
|
|
|
|
def resendMsg(self, ackdata):
|
|
|
|
"""Resend message by ackdata"""
|
|
|
|
self.logger.debug(
|
|
|
|
'It has been a long time and we haven\'t heard an acknowledgement'
|
|
|
|
' to our msg. Sending again.'
|
|
|
|
)
|
|
|
|
sqlExecute(
|
|
|
|
'''UPDATE sent SET status='msgqueued' WHERE ackdata=?''',
|
|
|
|
ackdata)
|
|
|
|
queues.workerQueue.put(('sendmessage', ''))
|
|
|
|
queues.UISignalQueue.put((
|
|
|
|
'updateStatusBar',
|
|
|
|
'Doing work necessary to again attempt to deliver a message...'
|
|
|
|
))
|
2019-11-26 14:19:44 +01:00
|
|
|
|
|
|
|
|
|
|
|
def deleteTrashMsgPermonantly():
|
|
|
|
"""This method is used to delete old messages"""
|
|
|
|
ndays_before_time = datetime.now() - timedelta(days=30)
|
|
|
|
old_messages = time.mktime(ndays_before_time.timetuple())
|
|
|
|
sqlExecute("delete from sent where folder = 'trash' and lastactiontime <= ?;", int(old_messages))
|
|
|
|
sqlExecute("delete from inbox where folder = 'trash' and received <= ?;", int(old_messages))
|
|
|
|
return
|