- moved knownnodes cleanup to knownnodes module,
  - added a check for last node in stream initiating DNS based bootstrap.
This commit is contained in:
Dmitri Bogomolov 2018-10-03 16:07:06 +03:00
parent 4c184d8ffe
commit e417b6257f
Signed by untrusted user: g1itch
GPG Key ID: 720A756F18DEED13
2 changed files with 69 additions and 54 deletions

View File

@ -55,7 +55,7 @@ class singleCleaner(threading.Thread, StoppableThread):
) + ( ) + (
float(BMConfigParser().get( float(BMConfigParser().get(
'bitmessagesettings', 'stopresendingafterxmonths')) * 'bitmessagesettings', 'stopresendingafterxmonths')) *
(60 * 60 * 24 * 365)/12) (60 * 60 * 24 * 365) / 12)
except: except:
# Either the user hasn't set stopresendingafterxdays and # Either the user hasn't set stopresendingafterxdays and
# stopresendingafterxmonths yet or the options are missing # stopresendingafterxmonths yet or the options are missing
@ -96,9 +96,8 @@ class singleCleaner(threading.Thread, StoppableThread):
"SELECT toaddress, ackdata, status FROM sent" "SELECT toaddress, ackdata, status FROM sent"
" WHERE ((status='awaitingpubkey' OR status='msgsent')" " WHERE ((status='awaitingpubkey' OR status='msgsent')"
" AND folder='sent' AND sleeptill<? AND senttime>?)", " AND folder='sent' AND sleeptill<? AND senttime>?)",
int(time.time()), int(time.time()), int(time.time()) -
int(time.time()) shared.maximumLengthOfTimeToBotherResendingMessages
- shared.maximumLengthOfTimeToBotherResendingMessages
) )
for row in queryreturn: for row in queryreturn:
if len(row) < 2: if len(row) < 2:
@ -115,40 +114,15 @@ class singleCleaner(threading.Thread, StoppableThread):
elif status == 'msgsent': elif status == 'msgsent':
resendMsg(ackData) resendMsg(ackData)
# cleanup old nodes
now = int(time.time())
with knownnodes.knownNodesLock:
for stream in knownnodes.knownNodes:
keys = knownnodes.knownNodes[stream].keys()
for node in keys:
try: try:
# scrap old nodes # Cleanup knownnodes and handle possible severe exception
if now - knownnodes.knownNodes[stream][node]["lastseen"] > 2419200: # 28 days # while writing it to disk
shared.needToWriteKnownNodesToDisk = True knownnodes.cleanupKnownNodes()
del knownnodes.knownNodes[stream][node]
continue
# scrap old nodes with low rating
if now - knownnodes.knownNodes[stream][node]["lastseen"] > 10800 and knownnodes.knownNodes[stream][node]["rating"] <= knownnodes.knownNodesForgetRating:
shared.needToWriteKnownNodesToDisk = True
del knownnodes.knownNodes[stream][node]
continue
except TypeError:
print "Error in %s" % node
keys = []
# Let us write out the knowNodes to disk
# if there is anything new to write out.
if shared.needToWriteKnownNodesToDisk:
try:
knownnodes.saveKnownNodes()
except Exception as err: except Exception as err:
if "Errno 28" in str(err): if "Errno 28" in str(err):
logger.fatal( logger.fatal(
'(while receiveDataThread' '(while writing knownnodes to disk)'
' knownnodes.needToWriteKnownNodesToDisk)' ' Alert: Your disk or data storage volume is full.'
' Alert: Your disk or data storage volume'
' is full. '
) )
queues.UISignalQueue.put(( queues.UISignalQueue.put((
'alert', 'alert',
@ -161,8 +135,7 @@ class singleCleaner(threading.Thread, StoppableThread):
)) ))
# FIXME redundant? # FIXME redundant?
if shared.daemon or not state.enableGUI: if shared.daemon or not state.enableGUI:
os._exit(0) os._exit(1)
shared.needToWriteKnownNodesToDisk = False
# # clear download queues # # clear download queues
# for thread in threading.enumerate(): # for thread in threading.enumerate():
@ -207,7 +180,8 @@ def resendPubkeyRequest(address):
queues.UISignalQueue.put(( queues.UISignalQueue.put((
'updateStatusBar', 'updateStatusBar',
'Doing work necessary to again attempt to request a public key...')) 'Doing work necessary to again attempt to request a public key...'
))
sqlExecute( sqlExecute(
'''UPDATE sent SET status='msgqueued' WHERE toaddress=?''', '''UPDATE sent SET status='msgqueued' WHERE toaddress=?''',
address) address)

View File

@ -8,6 +8,7 @@ import time
import state import state
from bmconfigparser import BMConfigParser from bmconfigparser import BMConfigParser
from debug import logger from debug import logger
from helper_bootstrap import dns
knownNodesLock = threading.Lock() knownNodesLock = threading.Lock()
knownNodes = {stream: {} for stream in range(1, 4)} knownNodes = {stream: {} for stream in range(1, 4)}
@ -157,3 +158,43 @@ def trimKnownNodes(recAddrStream=1):
)[:knownNodesTrimAmount] )[:knownNodesTrimAmount]
for oldest in oldestList: for oldest in oldestList:
del knownNodes[recAddrStream][oldest] del knownNodes[recAddrStream][oldest]
def cleanupKnownNodes():
"""
Cleanup knownnodes: remove old nodes and nodes with low rating
"""
now = int(time.time())
needToWriteKnownNodesToDisk = False
dns_done = False
with knownNodesLock:
for stream in knownNodes:
keys = knownNodes[stream].keys()
if len(keys) <= 1 and not dns_done: # leave at least one node
dns()
dns_done = True
continue
for node in keys:
try:
# scrap old nodes
if (now - knownNodes[stream][node]["lastseen"] >
2419200): # 28 days
needToWriteKnownNodesToDisk = True
del knownNodes[stream][node]
continue
# scrap old nodes with low rating
if (now - knownNodes[stream][node]["lastseen"] > 10800 and
knownNodes[stream][node]["rating"] <=
knownNodesForgetRating):
needToWriteKnownNodesToDisk = True
del knownNodes[stream][node]
continue
except TypeError:
logger.warning('Error in %s', node)
keys = []
# Let us write out the knowNodes to disk
# if there is anything new to write out.
if needToWriteKnownNodesToDisk:
saveKnownNodes()