knownNodes refactoring and shutdown fixes
- saveKnownNodes replaced the repeated pickle.dump - with knownNodesLock instead of acquire/release - outgoingSynSender had an unnecessary loop during shutdown causing excessive CPU usage / GUI freezing
This commit is contained in:
parent
e664746f04
commit
c778b81427
|
@ -76,7 +76,6 @@ from collections import OrderedDict
|
|||
from account import *
|
||||
from class_objectHashHolder import objectHashHolder
|
||||
from class_singleWorker import singleWorker
|
||||
import defaults
|
||||
from dialogs import AddAddressDialog
|
||||
from helper_generic import powQueueSize
|
||||
from inventory import PendingDownload, PendingUpload, PendingUploadDeadlineException
|
||||
|
@ -2507,11 +2506,7 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
with open(paths.lookupExeFolder() + 'keys.dat', 'wb') as configfile:
|
||||
BMConfigParser().write(configfile)
|
||||
# Write the knownnodes.dat file to disk in the new location
|
||||
knownnodes.knownNodesLock.acquire()
|
||||
output = open(paths.lookupExeFolder() + 'knownnodes.dat', 'wb')
|
||||
pickle.dump(knownnodes.knownNodes, output)
|
||||
output.close()
|
||||
knownnodes.knownNodesLock.release()
|
||||
knownnodes.saveKnownNodes(paths.lookupExeFolder())
|
||||
os.remove(state.appdata + 'keys.dat')
|
||||
os.remove(state.appdata + 'knownnodes.dat')
|
||||
previousAppdataLocation = state.appdata
|
||||
|
@ -2531,11 +2526,7 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
# Write the keys.dat file to disk in the new location
|
||||
BMConfigParser().save()
|
||||
# Write the knownnodes.dat file to disk in the new location
|
||||
knownnodes.knownNodesLock.acquire()
|
||||
output = open(state.appdata + 'knownnodes.dat', 'wb')
|
||||
pickle.dump(knownnodes.knownNodes, output)
|
||||
output.close()
|
||||
knownnodes.knownNodesLock.release()
|
||||
knownnodes.saveKnownNodes(state.appdata)
|
||||
os.remove(paths.lookupExeFolder() + 'keys.dat')
|
||||
os.remove(paths.lookupExeFolder() + 'knownnodes.dat')
|
||||
debug.restartLoggingInUpdatedAppdataLocation()
|
||||
|
|
|
@ -35,21 +35,18 @@ class outgoingSynSender(threading.Thread, StoppableThread):
|
|||
# ever connect to that. Otherwise we'll pick a random one from
|
||||
# the known nodes
|
||||
if state.trustedPeer:
|
||||
knownnodes.knownNodesLock.acquire()
|
||||
with knownnodes.knownNodesLock:
|
||||
peer = state.trustedPeer
|
||||
knownnodes.knownNodes[self.streamNumber][peer] = time.time()
|
||||
knownnodes.knownNodesLock.release()
|
||||
else:
|
||||
while not state.shutdown:
|
||||
knownnodes.knownNodesLock.acquire()
|
||||
while not self._stopped:
|
||||
with knownnodes.knownNodesLock:
|
||||
try:
|
||||
peer, = random.sample(knownnodes.knownNodes[self.streamNumber], 1)
|
||||
except ValueError: # no known nodes
|
||||
knownnodes.knownNodesLock.release()
|
||||
self.stop.wait(1)
|
||||
continue
|
||||
priority = (183600 - (time.time() - knownnodes.knownNodes[self.streamNumber][peer])) / 183600 # 2 days and 3 hours
|
||||
knownnodes.knownNodesLock.release()
|
||||
if BMConfigParser().get('bitmessagesettings', 'socksproxytype') != 'none':
|
||||
if peer.host.find(".onion") == -1:
|
||||
priority /= 10 # hidden services have 10x priority over plain net
|
||||
|
@ -82,7 +79,7 @@ class outgoingSynSender(threading.Thread, StoppableThread):
|
|||
while BMConfigParser().safeGetBoolean('bitmessagesettings', 'sendoutgoingconnections') and not self._stopped:
|
||||
self.name = "outgoingSynSender"
|
||||
maximumConnections = 1 if state.trustedPeer else 8 # maximum number of outgoing connections = 8
|
||||
while len(self.selfInitiatedConnections[self.streamNumber]) >= maximumConnections:
|
||||
while len(self.selfInitiatedConnections[self.streamNumber]) >= maximumConnections and not self._stopped:
|
||||
self.stop.wait(10)
|
||||
if state.shutdown:
|
||||
break
|
||||
|
@ -95,7 +92,7 @@ class outgoingSynSender(threading.Thread, StoppableThread):
|
|||
random.seed()
|
||||
peer = self._getPeer()
|
||||
self.stop.wait(1)
|
||||
if state.shutdown:
|
||||
if self._stopped:
|
||||
break
|
||||
# Clear out the shared.alreadyAttemptedConnectionsList every half
|
||||
# hour so that this program will again attempt a connection
|
||||
|
@ -110,7 +107,7 @@ class outgoingSynSender(threading.Thread, StoppableThread):
|
|||
shared.alreadyAttemptedConnectionsListLock.release()
|
||||
except threading.ThreadError as e:
|
||||
pass
|
||||
if state.shutdown:
|
||||
if self._stopped:
|
||||
break
|
||||
self.name = "outgoingSynSender-" + peer.host.replace(":", ".") # log parser field separator
|
||||
address_family = socket.AF_INET
|
||||
|
@ -133,12 +130,11 @@ class outgoingSynSender(threading.Thread, StoppableThread):
|
|||
|
||||
So let us remove the offending address from our knownNodes file.
|
||||
"""
|
||||
knownnodes.knownNodesLock.acquire()
|
||||
with knownnodes.knownNodesLock:
|
||||
try:
|
||||
del knownnodes.knownNodes[self.streamNumber][peer]
|
||||
except:
|
||||
pass
|
||||
knownnodes.knownNodesLock.release()
|
||||
logger.debug('deleting ' + str(peer) + ' from knownnodes.knownNodes because it caused a socks.socksocket exception. We must not be 64-bit compatible.')
|
||||
continue
|
||||
# This option apparently avoids the TIME_WAIT state so that we
|
||||
|
|
|
@ -3,7 +3,6 @@ import shared
|
|||
import time
|
||||
import sys
|
||||
import os
|
||||
import pickle
|
||||
|
||||
import tr#anslate
|
||||
from configparser import BMConfigParser
|
||||
|
@ -90,28 +89,23 @@ class singleCleaner(threading.Thread, StoppableThread):
|
|||
# cleanup old nodes
|
||||
now = int(time.time())
|
||||
toDelete = []
|
||||
knownnodes.knownNodesLock.acquire()
|
||||
with knownnodes.knownNodesLock:
|
||||
for stream in knownnodes.knownNodes:
|
||||
for node in knownnodes.knownNodes[stream].keys():
|
||||
if now - knownnodes.knownNodes[stream][node] > 2419200: # 28 days
|
||||
shared.needToWriteKownNodesToDisk = True
|
||||
del knownnodes.knownNodes[stream][node]
|
||||
knownnodes.knownNodesLock.release()
|
||||
|
||||
# Let us write out the knowNodes to disk if there is anything new to write out.
|
||||
if shared.needToWriteKnownNodesToDisk:
|
||||
knownnodes.knownNodesLock.acquire()
|
||||
output = open(state.appdata + 'knownnodes.dat', 'wb')
|
||||
try:
|
||||
pickle.dump(knownnodes.knownNodes, output)
|
||||
output.close()
|
||||
knownnodes.saveKnownNodes()
|
||||
except Exception as err:
|
||||
if "Errno 28" in str(err):
|
||||
logger.fatal('(while receiveDataThread knownnodes.needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ')
|
||||
queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
|
||||
if shared.daemon:
|
||||
os._exit(0)
|
||||
knownnodes.knownNodesLock.release()
|
||||
shared.needToWriteKnownNodesToDisk = False
|
||||
|
||||
# TODO: cleanup pending upload / download
|
||||
|
|
|
@ -1,5 +1,14 @@
|
|||
import pickle
|
||||
import threading
|
||||
|
||||
import state
|
||||
|
||||
knownNodesLock = threading.Lock()
|
||||
knownNodes = {}
|
||||
|
||||
def saveKnownNodes(dirName = None):
|
||||
if dirName is None:
|
||||
dirName = state.appdata
|
||||
with knownNodesLock:
|
||||
with open(dirName + 'knownnodes.dat', 'wb') as output:
|
||||
pickle.dump(knownNodes, output)
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import os
|
||||
import pickle
|
||||
import Queue
|
||||
import threading
|
||||
import time
|
||||
|
@ -10,7 +9,7 @@ from configparser import BMConfigParser
|
|||
from debug import logger
|
||||
from helper_sql import sqlQuery, sqlStoredProcedure
|
||||
from helper_threading import StoppableThread
|
||||
from knownnodes import knownNodes, knownNodesLock
|
||||
from knownnodes import saveKnownNodes
|
||||
from inventory import Inventory
|
||||
import protocol
|
||||
from queues import addressGeneratorQueue, objectProcessorQueue, parserInputQueue, UISignalQueue, workerQueue
|
||||
|
@ -29,17 +28,11 @@ def doCleanShutdown():
|
|||
if thread.isAlive() and isinstance(thread, StoppableThread):
|
||||
thread.stopThread()
|
||||
|
||||
knownNodesLock.acquire()
|
||||
UISignalQueue.put(('updateStatusBar','Saving the knownNodes list of peers to disk...'))
|
||||
output = open(state.appdata + 'knownnodes.dat', 'wb')
|
||||
logger.info('finished opening knownnodes.dat. Now pickle.dump')
|
||||
pickle.dump(knownNodes, output)
|
||||
logger.info('Completed pickle.dump. Closing output...')
|
||||
output.close()
|
||||
knownNodesLock.release()
|
||||
logger.info('Finished closing knownnodes.dat output file.')
|
||||
logger.info('Saving knownNodes list of peers to disk')
|
||||
saveKnownNodes()
|
||||
logger.info('Done saving knownNodes list of peers to disk')
|
||||
UISignalQueue.put(('updateStatusBar','Done saving the knownNodes list of peers to disk.'))
|
||||
|
||||
logger.info('Flushing inventory in memory out to disk...')
|
||||
UISignalQueue.put((
|
||||
'updateStatusBar',
|
||||
|
|
Loading…
Reference in New Issue
Block a user