knownNodes refactoring and shutdown fixes

- saveKnownNodes replaced the repeated pickle.dump
- with knownNodesLock instead of acquire/release
- outgoingSynSender had an unnecessary loop during shutdown causing
  excessive CPU usage / GUI freezing
This commit is contained in:
Peter Šurda 2017-02-09 11:53:33 +01:00
parent e664746f04
commit c778b81427
Signed by: PeterSurda
GPG Key ID: 0C5F50C0B5F37D87
5 changed files with 41 additions and 58 deletions

View File

@ -76,7 +76,6 @@ from collections import OrderedDict
from account import * from account import *
from class_objectHashHolder import objectHashHolder from class_objectHashHolder import objectHashHolder
from class_singleWorker import singleWorker from class_singleWorker import singleWorker
import defaults
from dialogs import AddAddressDialog from dialogs import AddAddressDialog
from helper_generic import powQueueSize from helper_generic import powQueueSize
from inventory import PendingDownload, PendingUpload, PendingUploadDeadlineException from inventory import PendingDownload, PendingUpload, PendingUploadDeadlineException
@ -2507,11 +2506,7 @@ class MyForm(settingsmixin.SMainWindow):
with open(paths.lookupExeFolder() + 'keys.dat', 'wb') as configfile: with open(paths.lookupExeFolder() + 'keys.dat', 'wb') as configfile:
BMConfigParser().write(configfile) BMConfigParser().write(configfile)
# Write the knownnodes.dat file to disk in the new location # Write the knownnodes.dat file to disk in the new location
knownnodes.knownNodesLock.acquire() knownnodes.saveKnownNodes(paths.lookupExeFolder())
output = open(paths.lookupExeFolder() + 'knownnodes.dat', 'wb')
pickle.dump(knownnodes.knownNodes, output)
output.close()
knownnodes.knownNodesLock.release()
os.remove(state.appdata + 'keys.dat') os.remove(state.appdata + 'keys.dat')
os.remove(state.appdata + 'knownnodes.dat') os.remove(state.appdata + 'knownnodes.dat')
previousAppdataLocation = state.appdata previousAppdataLocation = state.appdata
@ -2531,11 +2526,7 @@ class MyForm(settingsmixin.SMainWindow):
# Write the keys.dat file to disk in the new location # Write the keys.dat file to disk in the new location
BMConfigParser().save() BMConfigParser().save()
# Write the knownnodes.dat file to disk in the new location # Write the knownnodes.dat file to disk in the new location
knownnodes.knownNodesLock.acquire() knownnodes.saveKnownNodes(state.appdata)
output = open(state.appdata + 'knownnodes.dat', 'wb')
pickle.dump(knownnodes.knownNodes, output)
output.close()
knownnodes.knownNodesLock.release()
os.remove(paths.lookupExeFolder() + 'keys.dat') os.remove(paths.lookupExeFolder() + 'keys.dat')
os.remove(paths.lookupExeFolder() + 'knownnodes.dat') os.remove(paths.lookupExeFolder() + 'knownnodes.dat')
debug.restartLoggingInUpdatedAppdataLocation() debug.restartLoggingInUpdatedAppdataLocation()

View File

@ -35,21 +35,18 @@ class outgoingSynSender(threading.Thread, StoppableThread):
# ever connect to that. Otherwise we'll pick a random one from # ever connect to that. Otherwise we'll pick a random one from
# the known nodes # the known nodes
if state.trustedPeer: if state.trustedPeer:
knownnodes.knownNodesLock.acquire() with knownnodes.knownNodesLock:
peer = state.trustedPeer peer = state.trustedPeer
knownnodes.knownNodes[self.streamNumber][peer] = time.time() knownnodes.knownNodes[self.streamNumber][peer] = time.time()
knownnodes.knownNodesLock.release()
else: else:
while not state.shutdown: while not self._stopped:
knownnodes.knownNodesLock.acquire() with knownnodes.knownNodesLock:
try: try:
peer, = random.sample(knownnodes.knownNodes[self.streamNumber], 1) peer, = random.sample(knownnodes.knownNodes[self.streamNumber], 1)
except ValueError: # no known nodes except ValueError: # no known nodes
knownnodes.knownNodesLock.release()
self.stop.wait(1) self.stop.wait(1)
continue continue
priority = (183600 - (time.time() - knownnodes.knownNodes[self.streamNumber][peer])) / 183600 # 2 days and 3 hours priority = (183600 - (time.time() - knownnodes.knownNodes[self.streamNumber][peer])) / 183600 # 2 days and 3 hours
knownnodes.knownNodesLock.release()
if BMConfigParser().get('bitmessagesettings', 'socksproxytype') != 'none': if BMConfigParser().get('bitmessagesettings', 'socksproxytype') != 'none':
if peer.host.find(".onion") == -1: if peer.host.find(".onion") == -1:
priority /= 10 # hidden services have 10x priority over plain net priority /= 10 # hidden services have 10x priority over plain net
@ -82,7 +79,7 @@ class outgoingSynSender(threading.Thread, StoppableThread):
while BMConfigParser().safeGetBoolean('bitmessagesettings', 'sendoutgoingconnections') and not self._stopped: while BMConfigParser().safeGetBoolean('bitmessagesettings', 'sendoutgoingconnections') and not self._stopped:
self.name = "outgoingSynSender" self.name = "outgoingSynSender"
maximumConnections = 1 if state.trustedPeer else 8 # maximum number of outgoing connections = 8 maximumConnections = 1 if state.trustedPeer else 8 # maximum number of outgoing connections = 8
while len(self.selfInitiatedConnections[self.streamNumber]) >= maximumConnections: while len(self.selfInitiatedConnections[self.streamNumber]) >= maximumConnections and not self._stopped:
self.stop.wait(10) self.stop.wait(10)
if state.shutdown: if state.shutdown:
break break
@ -95,7 +92,7 @@ class outgoingSynSender(threading.Thread, StoppableThread):
random.seed() random.seed()
peer = self._getPeer() peer = self._getPeer()
self.stop.wait(1) self.stop.wait(1)
if state.shutdown: if self._stopped:
break break
# Clear out the shared.alreadyAttemptedConnectionsList every half # Clear out the shared.alreadyAttemptedConnectionsList every half
# hour so that this program will again attempt a connection # hour so that this program will again attempt a connection
@ -110,7 +107,7 @@ class outgoingSynSender(threading.Thread, StoppableThread):
shared.alreadyAttemptedConnectionsListLock.release() shared.alreadyAttemptedConnectionsListLock.release()
except threading.ThreadError as e: except threading.ThreadError as e:
pass pass
if state.shutdown: if self._stopped:
break break
self.name = "outgoingSynSender-" + peer.host.replace(":", ".") # log parser field separator self.name = "outgoingSynSender-" + peer.host.replace(":", ".") # log parser field separator
address_family = socket.AF_INET address_family = socket.AF_INET
@ -133,12 +130,11 @@ class outgoingSynSender(threading.Thread, StoppableThread):
So let us remove the offending address from our knownNodes file. So let us remove the offending address from our knownNodes file.
""" """
knownnodes.knownNodesLock.acquire() with knownnodes.knownNodesLock:
try: try:
del knownnodes.knownNodes[self.streamNumber][peer] del knownnodes.knownNodes[self.streamNumber][peer]
except: except:
pass pass
knownnodes.knownNodesLock.release()
logger.debug('deleting ' + str(peer) + ' from knownnodes.knownNodes because it caused a socks.socksocket exception. We must not be 64-bit compatible.') logger.debug('deleting ' + str(peer) + ' from knownnodes.knownNodes because it caused a socks.socksocket exception. We must not be 64-bit compatible.')
continue continue
# This option apparently avoids the TIME_WAIT state so that we # This option apparently avoids the TIME_WAIT state so that we

View File

@ -3,7 +3,6 @@ import shared
import time import time
import sys import sys
import os import os
import pickle
import tr#anslate import tr#anslate
from configparser import BMConfigParser from configparser import BMConfigParser
@ -90,28 +89,23 @@ class singleCleaner(threading.Thread, StoppableThread):
# cleanup old nodes # cleanup old nodes
now = int(time.time()) now = int(time.time())
toDelete = [] toDelete = []
knownnodes.knownNodesLock.acquire() with knownnodes.knownNodesLock:
for stream in knownnodes.knownNodes: for stream in knownnodes.knownNodes:
for node in knownnodes.knownNodes[stream].keys(): for node in knownnodes.knownNodes[stream].keys():
if now - knownnodes.knownNodes[stream][node] > 2419200: # 28 days if now - knownnodes.knownNodes[stream][node] > 2419200: # 28 days
shared.needToWriteKownNodesToDisk = True shared.needToWriteKownNodesToDisk = True
del knownnodes.knownNodes[stream][node] del knownnodes.knownNodes[stream][node]
knownnodes.knownNodesLock.release()
# Let us write out the knowNodes to disk if there is anything new to write out. # Let us write out the knowNodes to disk if there is anything new to write out.
if shared.needToWriteKnownNodesToDisk: if shared.needToWriteKnownNodesToDisk:
knownnodes.knownNodesLock.acquire()
output = open(state.appdata + 'knownnodes.dat', 'wb')
try: try:
pickle.dump(knownnodes.knownNodes, output) knownnodes.saveKnownNodes()
output.close()
except Exception as err: except Exception as err:
if "Errno 28" in str(err): if "Errno 28" in str(err):
logger.fatal('(while receiveDataThread knownnodes.needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ') logger.fatal('(while receiveDataThread knownnodes.needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ')
queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True))) queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
if shared.daemon: if shared.daemon:
os._exit(0) os._exit(0)
knownnodes.knownNodesLock.release()
shared.needToWriteKnownNodesToDisk = False shared.needToWriteKnownNodesToDisk = False
# TODO: cleanup pending upload / download # TODO: cleanup pending upload / download

View File

@ -1,5 +1,14 @@
import pickle
import threading import threading
import state
knownNodesLock = threading.Lock() knownNodesLock = threading.Lock()
knownNodes = {} knownNodes = {}
def saveKnownNodes(dirName = None):
if dirName is None:
dirName = state.appdata
with knownNodesLock:
with open(dirName + 'knownnodes.dat', 'wb') as output:
pickle.dump(knownNodes, output)

View File

@ -1,5 +1,4 @@
import os import os
import pickle
import Queue import Queue
import threading import threading
import time import time
@ -10,7 +9,7 @@ from configparser import BMConfigParser
from debug import logger from debug import logger
from helper_sql import sqlQuery, sqlStoredProcedure from helper_sql import sqlQuery, sqlStoredProcedure
from helper_threading import StoppableThread from helper_threading import StoppableThread
from knownnodes import knownNodes, knownNodesLock from knownnodes import saveKnownNodes
from inventory import Inventory from inventory import Inventory
import protocol import protocol
from queues import addressGeneratorQueue, objectProcessorQueue, parserInputQueue, UISignalQueue, workerQueue from queues import addressGeneratorQueue, objectProcessorQueue, parserInputQueue, UISignalQueue, workerQueue
@ -29,17 +28,11 @@ def doCleanShutdown():
if thread.isAlive() and isinstance(thread, StoppableThread): if thread.isAlive() and isinstance(thread, StoppableThread):
thread.stopThread() thread.stopThread()
knownNodesLock.acquire()
UISignalQueue.put(('updateStatusBar','Saving the knownNodes list of peers to disk...')) UISignalQueue.put(('updateStatusBar','Saving the knownNodes list of peers to disk...'))
output = open(state.appdata + 'knownnodes.dat', 'wb') logger.info('Saving knownNodes list of peers to disk')
logger.info('finished opening knownnodes.dat. Now pickle.dump') saveKnownNodes()
pickle.dump(knownNodes, output) logger.info('Done saving knownNodes list of peers to disk')
logger.info('Completed pickle.dump. Closing output...')
output.close()
knownNodesLock.release()
logger.info('Finished closing knownnodes.dat output file.')
UISignalQueue.put(('updateStatusBar','Done saving the knownNodes list of peers to disk.')) UISignalQueue.put(('updateStatusBar','Done saving the knownNodes list of peers to disk.'))
logger.info('Flushing inventory in memory out to disk...') logger.info('Flushing inventory in memory out to disk...')
UISignalQueue.put(( UISignalQueue.put((
'updateStatusBar', 'updateStatusBar',