Fixes #1335:
- moved knownnodes cleanup to knownnodes module, - added a check for last node in stream initiating DNS based bootstrap.
This commit is contained in:
parent
4c184d8ffe
commit
e417b6257f
|
@ -55,7 +55,7 @@ class singleCleaner(threading.Thread, StoppableThread):
|
||||||
) + (
|
) + (
|
||||||
float(BMConfigParser().get(
|
float(BMConfigParser().get(
|
||||||
'bitmessagesettings', 'stopresendingafterxmonths')) *
|
'bitmessagesettings', 'stopresendingafterxmonths')) *
|
||||||
(60 * 60 * 24 * 365)/12)
|
(60 * 60 * 24 * 365) / 12)
|
||||||
except:
|
except:
|
||||||
# Either the user hasn't set stopresendingafterxdays and
|
# Either the user hasn't set stopresendingafterxdays and
|
||||||
# stopresendingafterxmonths yet or the options are missing
|
# stopresendingafterxmonths yet or the options are missing
|
||||||
|
@ -96,9 +96,8 @@ class singleCleaner(threading.Thread, StoppableThread):
|
||||||
"SELECT toaddress, ackdata, status FROM sent"
|
"SELECT toaddress, ackdata, status FROM sent"
|
||||||
" WHERE ((status='awaitingpubkey' OR status='msgsent')"
|
" WHERE ((status='awaitingpubkey' OR status='msgsent')"
|
||||||
" AND folder='sent' AND sleeptill<? AND senttime>?)",
|
" AND folder='sent' AND sleeptill<? AND senttime>?)",
|
||||||
int(time.time()),
|
int(time.time()), int(time.time()) -
|
||||||
int(time.time())
|
shared.maximumLengthOfTimeToBotherResendingMessages
|
||||||
- shared.maximumLengthOfTimeToBotherResendingMessages
|
|
||||||
)
|
)
|
||||||
for row in queryreturn:
|
for row in queryreturn:
|
||||||
if len(row) < 2:
|
if len(row) < 2:
|
||||||
|
@ -115,54 +114,28 @@ class singleCleaner(threading.Thread, StoppableThread):
|
||||||
elif status == 'msgsent':
|
elif status == 'msgsent':
|
||||||
resendMsg(ackData)
|
resendMsg(ackData)
|
||||||
|
|
||||||
# cleanup old nodes
|
try:
|
||||||
now = int(time.time())
|
# Cleanup knownnodes and handle possible severe exception
|
||||||
|
# while writing it to disk
|
||||||
with knownnodes.knownNodesLock:
|
knownnodes.cleanupKnownNodes()
|
||||||
for stream in knownnodes.knownNodes:
|
except Exception as err:
|
||||||
keys = knownnodes.knownNodes[stream].keys()
|
if "Errno 28" in str(err):
|
||||||
for node in keys:
|
logger.fatal(
|
||||||
try:
|
'(while writing knownnodes to disk)'
|
||||||
# scrap old nodes
|
' Alert: Your disk or data storage volume is full.'
|
||||||
if now - knownnodes.knownNodes[stream][node]["lastseen"] > 2419200: # 28 days
|
)
|
||||||
shared.needToWriteKnownNodesToDisk = True
|
queues.UISignalQueue.put((
|
||||||
del knownnodes.knownNodes[stream][node]
|
'alert',
|
||||||
continue
|
(tr._translate("MainWindow", "Disk full"),
|
||||||
# scrap old nodes with low rating
|
tr._translate(
|
||||||
if now - knownnodes.knownNodes[stream][node]["lastseen"] > 10800 and knownnodes.knownNodes[stream][node]["rating"] <= knownnodes.knownNodesForgetRating:
|
"MainWindow",
|
||||||
shared.needToWriteKnownNodesToDisk = True
|
'Alert: Your disk or data storage volume'
|
||||||
del knownnodes.knownNodes[stream][node]
|
' is full. Bitmessage will now exit.'),
|
||||||
continue
|
True)
|
||||||
except TypeError:
|
))
|
||||||
print "Error in %s" % node
|
# FIXME redundant?
|
||||||
keys = []
|
if shared.daemon or not state.enableGUI:
|
||||||
|
os._exit(1)
|
||||||
# Let us write out the knowNodes to disk
|
|
||||||
# if there is anything new to write out.
|
|
||||||
if shared.needToWriteKnownNodesToDisk:
|
|
||||||
try:
|
|
||||||
knownnodes.saveKnownNodes()
|
|
||||||
except Exception as err:
|
|
||||||
if "Errno 28" in str(err):
|
|
||||||
logger.fatal(
|
|
||||||
'(while receiveDataThread'
|
|
||||||
' knownnodes.needToWriteKnownNodesToDisk)'
|
|
||||||
' Alert: Your disk or data storage volume'
|
|
||||||
' is full. '
|
|
||||||
)
|
|
||||||
queues.UISignalQueue.put((
|
|
||||||
'alert',
|
|
||||||
(tr._translate("MainWindow", "Disk full"),
|
|
||||||
tr._translate(
|
|
||||||
"MainWindow",
|
|
||||||
'Alert: Your disk or data storage volume'
|
|
||||||
' is full. Bitmessage will now exit.'),
|
|
||||||
True)
|
|
||||||
))
|
|
||||||
# FIXME redundant?
|
|
||||||
if shared.daemon or not state.enableGUI:
|
|
||||||
os._exit(0)
|
|
||||||
shared.needToWriteKnownNodesToDisk = False
|
|
||||||
|
|
||||||
# # clear download queues
|
# # clear download queues
|
||||||
# for thread in threading.enumerate():
|
# for thread in threading.enumerate():
|
||||||
|
@ -206,8 +179,9 @@ def resendPubkeyRequest(address):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
queues.UISignalQueue.put((
|
queues.UISignalQueue.put((
|
||||||
'updateStatusBar',
|
'updateStatusBar',
|
||||||
'Doing work necessary to again attempt to request a public key...'))
|
'Doing work necessary to again attempt to request a public key...'
|
||||||
|
))
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'''UPDATE sent SET status='msgqueued' WHERE toaddress=?''',
|
'''UPDATE sent SET status='msgqueued' WHERE toaddress=?''',
|
||||||
address)
|
address)
|
||||||
|
|
|
@ -8,6 +8,7 @@ import time
|
||||||
import state
|
import state
|
||||||
from bmconfigparser import BMConfigParser
|
from bmconfigparser import BMConfigParser
|
||||||
from debug import logger
|
from debug import logger
|
||||||
|
from helper_bootstrap import dns
|
||||||
|
|
||||||
knownNodesLock = threading.Lock()
|
knownNodesLock = threading.Lock()
|
||||||
knownNodes = {stream: {} for stream in range(1, 4)}
|
knownNodes = {stream: {} for stream in range(1, 4)}
|
||||||
|
@ -157,3 +158,43 @@ def trimKnownNodes(recAddrStream=1):
|
||||||
)[:knownNodesTrimAmount]
|
)[:knownNodesTrimAmount]
|
||||||
for oldest in oldestList:
|
for oldest in oldestList:
|
||||||
del knownNodes[recAddrStream][oldest]
|
del knownNodes[recAddrStream][oldest]
|
||||||
|
|
||||||
|
|
||||||
|
def cleanupKnownNodes():
|
||||||
|
"""
|
||||||
|
Cleanup knownnodes: remove old nodes and nodes with low rating
|
||||||
|
"""
|
||||||
|
now = int(time.time())
|
||||||
|
needToWriteKnownNodesToDisk = False
|
||||||
|
dns_done = False
|
||||||
|
|
||||||
|
with knownNodesLock:
|
||||||
|
for stream in knownNodes:
|
||||||
|
keys = knownNodes[stream].keys()
|
||||||
|
if len(keys) <= 1 and not dns_done: # leave at least one node
|
||||||
|
dns()
|
||||||
|
dns_done = True
|
||||||
|
continue
|
||||||
|
for node in keys:
|
||||||
|
try:
|
||||||
|
# scrap old nodes
|
||||||
|
if (now - knownNodes[stream][node]["lastseen"] >
|
||||||
|
2419200): # 28 days
|
||||||
|
needToWriteKnownNodesToDisk = True
|
||||||
|
del knownNodes[stream][node]
|
||||||
|
continue
|
||||||
|
# scrap old nodes with low rating
|
||||||
|
if (now - knownNodes[stream][node]["lastseen"] > 10800 and
|
||||||
|
knownNodes[stream][node]["rating"] <=
|
||||||
|
knownNodesForgetRating):
|
||||||
|
needToWriteKnownNodesToDisk = True
|
||||||
|
del knownNodes[stream][node]
|
||||||
|
continue
|
||||||
|
except TypeError:
|
||||||
|
logger.warning('Error in %s', node)
|
||||||
|
keys = []
|
||||||
|
|
||||||
|
# Let us write out the knowNodes to disk
|
||||||
|
# if there is anything new to write out.
|
||||||
|
if needToWriteKnownNodesToDisk:
|
||||||
|
saveKnownNodes()
|
||||||
|
|
Loading…
Reference in New Issue
Block a user