diff --git a/src/bitmessagemain.py b/src/bitmessagemain.py index 06d89ba8..27b641a6 100755 --- a/src/bitmessagemain.py +++ b/src/bitmessagemain.py @@ -19,7 +19,8 @@ sys.path.insert(0, app_dir) import depends depends.check_dependencies() -import signal # Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully. +# Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully. +import signal # The next 3 are used for the API from singleinstance import singleinstance import errno @@ -32,7 +33,9 @@ from random import randint import getopt from api import MySimpleXMLRPCRequestHandler, StoppableXMLRPCServer -from helper_startup import isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections +from helper_startup import ( + isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections +) import defaults import shared @@ -73,17 +76,19 @@ def connectToStream(streamNumber): selfInitiatedConnections[streamNumber] = {} if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections(): - # Some XP and Vista systems can only have 10 outgoing connections at a time. + # Some XP and Vista systems can only have 10 outgoing connections + # at a time. state.maximumNumberOfHalfOpenConnections = 9 else: state.maximumNumberOfHalfOpenConnections = 64 try: # don't overload Tor - if BMConfigParser().get('bitmessagesettings', 'socksproxytype') != 'none': + if BMConfigParser().get( + 'bitmessagesettings', 'socksproxytype') != 'none': state.maximumNumberOfHalfOpenConnections = 4 except: pass - + with knownnodes.knownNodesLock: if streamNumber not in knownnodes.knownNodes: knownnodes.knownNodes[streamNumber] = {} @@ -94,6 +99,7 @@ def connectToStream(streamNumber): BMConnectionPool().connectToStream(streamNumber) + def _fixSocket(): if sys.platform.startswith('linux'): socket.SO_BINDTODEVICE = 25 @@ -105,6 +111,7 @@ def _fixSocket(): # socket.inet_ntop but we can make one ourselves using ctypes if not hasattr(socket, 'inet_ntop'): addressToString = ctypes.windll.ws2_32.WSAAddressToStringA + def inet_ntop(family, host): if family == socket.AF_INET: if len(host) != 4: @@ -125,6 +132,7 @@ def _fixSocket(): # Same for inet_pton if not hasattr(socket, 'inet_pton'): stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA + def inet_pton(family, host): buf = "\0" * 28 lengthBuf = pack("I", len(buf)) @@ -148,18 +156,21 @@ def _fixSocket(): if not hasattr(socket, 'IPV6_V6ONLY'): socket.IPV6_V6ONLY = 27 + # This thread, of which there is only one, runs the API. class singleAPI(threading.Thread, helper_threading.StoppableThread): def __init__(self): threading.Thread.__init__(self, name="singleAPI") self.initStop() - + def stopThread(self): super(singleAPI, self).stopThread() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: - s.connect((BMConfigParser().get('bitmessagesettings', 'apiinterface'), BMConfigParser().getint( - 'bitmessagesettings', 'apiport'))) + s.connect(( + BMConfigParser().get('bitmessagesettings', 'apiinterface'), + BMConfigParser().getint('bitmessagesettings', 'apiport') + )) s.shutdown(socket.SHUT_RDWR) s.close() except: @@ -175,14 +186,18 @@ class singleAPI(threading.Thread, helper_threading.StoppableThread): try: if attempt > 0: port = randint(32767, 65535) - se = StoppableXMLRPCServer((BMConfigParser().get('bitmessagesettings', 'apiinterface'), port), + se = StoppableXMLRPCServer( + (BMConfigParser().get( + 'bitmessagesettings', 'apiinterface'), + port), MySimpleXMLRPCRequestHandler, True, True) except socket.error as e: if e.errno in (errno.EADDRINUSE, errno.WSAEADDRINUSE): continue else: if attempt > 0: - BMConfigParser().set("bitmessagesettings", "apiport", str(port)) + BMConfigParser().set( + "bitmessagesettings", "apiport", str(port)) BMConfigParser().save() break se.register_introspection_functions() @@ -197,14 +212,17 @@ if shared.useVeryEasyProofOfWorkForTesting: defaults.networkDefaultPayloadLengthExtraBytes = int( defaults.networkDefaultPayloadLengthExtraBytes / 100) + class Main: def start(self): _fixSocket() - daemon = BMConfigParser().safeGetBoolean('bitmessagesettings', 'daemon') + daemon = BMConfigParser().safeGetBoolean( + 'bitmessagesettings', 'daemon') try: - opts, args = getopt.getopt(sys.argv[1:], "hcd", + opts, args = getopt.getopt( + sys.argv[1:], "hcd", ["help", "curses", "daemon"]) except getopt.GetoptError: @@ -237,40 +255,51 @@ class Main: helper_bootstrap.knownNodes() # Start the address generation thread addressGeneratorThread = addressGenerator() - addressGeneratorThread.daemon = True # close the main program even if there are threads left + # close the main program even if there are threads left + addressGeneratorThread.daemon = True addressGeneratorThread.start() # Start the thread that calculates POWs singleWorkerThread = singleWorker() - singleWorkerThread.daemon = True # close the main program even if there are threads left + # close the main program even if there are threads left + singleWorkerThread.daemon = True singleWorkerThread.start() # Start the SQL thread sqlLookup = sqlThread() - sqlLookup.daemon = False # DON'T close the main program even if there are threads left. The closeEvent should command this thread to exit gracefully. + # DON'T close the main program even if there are threads left. + # The closeEvent should command this thread to exit gracefully. + sqlLookup.daemon = False sqlLookup.start() - Inventory() # init - DandelionStems() # init, needs to be early because other thread may access it early + Inventory() # init + # init, needs to be early because other thread may access it early + DandelionStems() # SMTP delivery thread - if daemon and BMConfigParser().safeGet("bitmessagesettings", "smtpdeliver", '') != '': + if daemon and BMConfigParser().safeGet( + "bitmessagesettings", "smtpdeliver", ''): smtpDeliveryThread = smtpDeliver() smtpDeliveryThread.start() # SMTP daemon thread - if daemon and BMConfigParser().safeGetBoolean("bitmessagesettings", "smtpd"): + if daemon and BMConfigParser().safeGetBoolean( + "bitmessagesettings", "smtpd"): smtpServerThread = smtpServer() smtpServerThread.start() # Start the thread that calculates POWs objectProcessorThread = objectProcessor() - objectProcessorThread.daemon = False # DON'T close the main program even the thread remains. This thread checks the shutdown variable after processing each object. + # DON'T close the main program even the thread remains. + # This thread checks the shutdown variable after processing + # each object. + objectProcessorThread.daemon = False objectProcessorThread.start() # Start the cleanerThread singleCleanerThread = singleCleaner() - singleCleanerThread.daemon = True # close the main program even if there are threads left + # close the main program even if there are threads left + singleCleanerThread.daemon = True singleCleanerThread.start() shared.reloadMyAddressHashes() @@ -288,7 +317,8 @@ class Main: call([apiNotifyPath, "startingUp"]) singleAPIThread = singleAPI() - singleAPIThread.daemon = True # close the main program even if there are threads left + # close the main program even if there are threads left + singleAPIThread.daemon = True singleAPIThread.start() BMConnectionPool() @@ -314,26 +344,39 @@ class Main: connectToStream(1) - if BMConfigParser().safeGetBoolean('bitmessagesettings','upnp'): + if BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp'): import upnp upnpThread = upnp.uPnPThread() upnpThread.start() - if daemon == False and BMConfigParser().safeGetBoolean('bitmessagesettings', 'daemon') == False: - if state.curses == False: + if daemon is False and \ + BMConfigParser().safeGetBoolean( + 'bitmessagesettings', 'daemon') is False: + if state.curses is False: if not depends.check_pyqt(): - print('PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\'. If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon') - print('You can also run PyBitmessage with the new curses interface by providing \'-c\' as a commandline argument.') + print( + 'PyBitmessage requires PyQt unless you want' + ' to run it as a daemon and interact with it' + ' using the API. You can download PyQt from ' + 'http://www.riverbankcomputing.com/software/pyqt/download' + ' or by searching Google for \'PyQt Download\'.' + ' If you want to run in daemon mode, see ' + 'https://bitmessage.org/wiki/Daemon' + ) + print( + 'You can also run PyBitmessage with' + ' the new curses interface by providing' + ' \'-c\' as a commandline argument.' + ) sys.exit() import bitmessageqt bitmessageqt.run() else: - if True: -# if depends.check_curses(): - print('Running with curses') - import bitmessagecurses - bitmessagecurses.runwrapper() + # if depends.check_curses(): + print('Running with curses') + import bitmessagecurses + bitmessagecurses.runwrapper() else: BMConfigParser().remove_option('bitmessagesettings', 'dontconnect') @@ -349,7 +392,7 @@ class Main: # fork not implemented pass else: - shared.thisapp.lock() # relock + shared.thisapp.lock() # relock os.umask(0) try: os.setsid() @@ -363,8 +406,8 @@ class Main: # fork not implemented pass else: - shared.thisapp.lock() # relock - shared.thisapp.lockPid = None # indicate we're the final child + shared.thisapp.lock() # relock + shared.thisapp.lockPid = None # indicate we're the final child sys.stdout.flush() sys.stderr.flush() if not sys.platform.startswith('win'): @@ -396,14 +439,14 @@ All parameters are optional. print('Stopping Bitmessage Deamon.') shutdown.doCleanShutdown() - - #TODO: nice function but no one is using this + # TODO: nice function but no one is using this def getApiAddress(self): - if not BMConfigParser().safeGetBoolean('bitmessagesettings', 'apienabled'): + if not BMConfigParser().safeGetBoolean( + 'bitmessagesettings', 'apienabled'): return None address = BMConfigParser().get('bitmessagesettings', 'apiinterface') port = BMConfigParser().getint('bitmessagesettings', 'apiport') - return {'address':address,'port':port} + return {'address': address, 'port': port} def main(): diff --git a/src/class_singleCleaner.py b/src/class_singleCleaner.py index 2e4140b6..2c022199 100644 --- a/src/class_singleCleaner.py +++ b/src/class_singleCleaner.py @@ -1,13 +1,12 @@ import threading import shared import time -import sys import os -import tr#anslate +import tr from bmconfigparser import BMConfigParser -from helper_sql import * -from helper_threading import * +from helper_sql import sqlQuery, sqlExecute +from helper_threading import StoppableThread from inventory import Inventory from network.connectionpool import BMConnectionPool from network.dandelion import DandelionStems @@ -18,8 +17,8 @@ import protocol import state """ -The singleCleaner class is a timer-driven thread that cleans data structures -to free memory, resends messages when a remote node doesn't respond, and +The singleCleaner class is a timer-driven thread that cleans data structures +to free memory, resends messages when a remote node doesn't respond, and sends pong messages to keep connections alive if the network isn't busy. It cleans these data structures in memory: inventory (moves data to the on-disk sql database) @@ -48,9 +47,17 @@ class singleCleaner(threading.Thread, StoppableThread): def run(self): timeWeLastClearedInventoryAndPubkeysTables = 0 try: - shared.maximumLengthOfTimeToBotherResendingMessages = (float(BMConfigParser().get('bitmessagesettings', 'stopresendingafterxdays')) * 24 * 60 * 60) + (float(BMConfigParser().get('bitmessagesettings', 'stopresendingafterxmonths')) * (60 * 60 * 24 *365)/12) + shared.maximumLengthOfTimeToBotherResendingMessages = ( + float(BMConfigParser().get( + 'bitmessagesettings', 'stopresendingafterxdays')) * + 24 * 60 * 60) + ( + float(BMConfigParser().get( + 'bitmessagesettings', 'stopresendingafterxmonths')) * + (60 * 60 * 24 * 365)/12) except: - # Either the user hasn't set stopresendingafterxdays and stopresendingafterxmonths yet or the options are missing from the config file. + # Either the user hasn't set stopresendingafterxdays and + # stopresendingafterxmonths yet or the options are missing + # from the config file. shared.maximumLengthOfTimeToBotherResendingMessages = float('inf') # initial wait @@ -59,18 +66,23 @@ class singleCleaner(threading.Thread, StoppableThread): while state.shutdown == 0: queues.UISignalQueue.put(( - 'updateStatusBar', 'Doing housekeeping (Flushing inventory in memory to disk...)')) + 'updateStatusBar', + 'Doing housekeeping (Flushing inventory in memory to disk...)' + )) Inventory().flush() queues.UISignalQueue.put(('updateStatusBar', '')) - - protocol.broadcastToSendDataQueues(( - 0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes. + + # commands the sendData threads to send out a pong message + # if they haven't sent anything else in the last five minutes. + # The socket timeout-time is 10 minutes. + protocol.broadcastToSendDataQueues((0, 'pong', 'no data')) # If we are running as a daemon then we are going to fill up the UI # queue which will never be handled by a UI. We should clear it to # save memory. if BMConfigParser().safeGetBoolean('bitmessagesettings', 'daemon'): queues.UISignalQueue.queue.clear() - if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380: + if timeWeLastClearedInventoryAndPubkeysTables < \ + int(time.time()) - 7380: timeWeLastClearedInventoryAndPubkeysTables = int(time.time()) Inventory().clean() # pubkeys @@ -78,14 +90,20 @@ class singleCleaner(threading.Thread, StoppableThread): '''DELETE FROM pubkeys WHERE time?) ''', int(time.time()), int(time.time()) - shared.maximumLengthOfTimeToBotherResendingMessages) for row in queryreturn: if len(row) < 2: - logger.error('Something went wrong in the singleCleaner thread: a query did not return the requested fields. ' + repr(row)) + logger.error( + 'Something went wrong in the singleCleaner thread:' + ' a query did not return the requested fields. %r', + repr(row) + ) self.stop.wait(3) break toAddress, ackData, status = row @@ -96,7 +114,7 @@ class singleCleaner(threading.Thread, StoppableThread): # cleanup old nodes now = int(time.time()) - toDelete = [] + # toDelete = [] with knownnodes.knownNodesLock: for stream in knownnodes.knownNodes: for node in knownnodes.knownNodes[stream].keys(): @@ -107,14 +125,24 @@ class singleCleaner(threading.Thread, StoppableThread): except TypeError: print "Error in %s" % (str(node)) - # Let us write out the knowNodes to disk if there is anything new to write out. + # Let us write out the knowNodes to disk + # if there is anything new to write out. if shared.needToWriteKnownNodesToDisk: try: knownnodes.saveKnownNodes() except Exception as err: if "Errno 28" in str(err): - logger.fatal('(while receiveDataThread knownnodes.needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ') - queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True))) + logger.fatal( + '(while receiveDataThread knownnodes.needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ') + queues.UISignalQueue.put(( + 'alert', + (tr._translate("MainWindow", "Disk full"), + tr._translate( + "MainWindow", + 'Alert: Your disk or data storage volume' + ' is full. Bitmessage will now exit.'), + True) + )) if shared.daemon: os._exit(0) shared.needToWriteKnownNodesToDisk = False @@ -125,7 +153,8 @@ class singleCleaner(threading.Thread, StoppableThread): thread.downloadQueue.clear() # inv/object tracking - for connection in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values(): + for connection in BMConnectionPool().inboundConnections.values() + \ + BMConnectionPool().outboundConnections.values(): connection.clean() # dandelion fluff trigger by expiration for h, t in DandelionStems().timeouts: @@ -147,25 +176,38 @@ class singleCleaner(threading.Thread, StoppableThread): def resendPubkeyRequest(address): - logger.debug('It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.') + logger.debug( + 'It has been a long time and we haven\'t heard a response to our' + ' getpubkey request. Sending again.' + ) try: - del state.neededPubkeys[ - address] # We need to take this entry out of the neededPubkeys structure because the queues.workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently. + # We need to take this entry out of the neededPubkeys structure + # because the queues.workerQueue checks to see whether the entry + # is already present and will not do the POW and send the message + # because it assumes that it has already done it recently. + del state.neededPubkeys[address] except: pass queues.UISignalQueue.put(( - 'updateStatusBar', 'Doing work necessary to again attempt to request a public key...')) + 'updateStatusBar', + 'Doing work necessary to again attempt to request a public key...')) sqlExecute( '''UPDATE sent SET status='msgqueued' WHERE toaddress=?''', address) queues.workerQueue.put(('sendmessage', '')) + def resendMsg(ackdata): - logger.debug('It has been a long time and we haven\'t heard an acknowledgement to our msg. Sending again.') + logger.debug( + 'It has been a long time and we haven\'t heard an acknowledgement' + ' to our msg. Sending again.' + ) sqlExecute( '''UPDATE sent SET status='msgqueued' WHERE ackdata=?''', ackdata) queues.workerQueue.put(('sendmessage', '')) queues.UISignalQueue.put(( - 'updateStatusBar', 'Doing work necessary to again attempt to deliver a message...')) + 'updateStatusBar', + 'Doing work necessary to again attempt to deliver a message...' + )) diff --git a/src/helper_generic.py b/src/helper_generic.py index b750e519..173f1ea6 100644 --- a/src/helper_generic.py +++ b/src/helper_generic.py @@ -1,4 +1,3 @@ -import os import socket import sys from binascii import hexlify, unhexlify @@ -11,6 +10,7 @@ from debug import logger import queues import shutdown + def powQueueSize(): curWorkerQueue = queues.workerQueue.qsize() for thread in enumerate(): @@ -21,6 +21,7 @@ def powQueueSize(): pass return curWorkerQueue + def convertIntToString(n): a = __builtins__.hex(n) if a[-1:] == 'L': @@ -30,24 +31,33 @@ def convertIntToString(n): else: return unhexlify('0' + a[2:]) + def convertStringToInt(s): return int(hexlify(s), 16) + def allThreadTraceback(frame): id2name = dict([(th.ident, th.name) for th in enumerate()]) code = [] for threadId, stack in sys._current_frames().items(): - code.append("\n# Thread: %s(%d)" % (id2name.get(threadId,""), threadId)) + code.append("\n# Thread: %s(%d)" % + (id2name.get(threadId, ""), threadId)) for filename, lineno, name, line in traceback.extract_stack(stack): - code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) + code.append('File: "%s", line %d, in %s' % + (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) print "\n".join(code) + def signal_handler(signal, frame): - logger.error("Got signal %i in %s/%s", signal, current_process().name, current_thread().name) + logger.error( + "Got signal %i in %s/%s", + signal, current_process().name, current_thread().name + ) if current_process().name == "RegExParser": - # on Windows this isn't triggered, but it's fine, it has its own process termination thing + # on Windows this isn't triggered, but it's fine, + # it has its own process termination thing raise SystemExit if "PoolWorker" in current_process().name: raise SystemExit @@ -58,10 +68,12 @@ def signal_handler(signal, frame): shutdown.doCleanShutdown() else: allThreadTraceback(frame) - print 'Unfortunately you cannot use Ctrl+C when running the UI because the UI captures the signal.' + print('Unfortunately you cannot use Ctrl+C when running the UI' + ' because the UI captures the signal.') + def isHostInPrivateIPRange(host): - if ":" in host: #IPv6 + if ":" in host: # IPv6 hostAddr = socket.inet_pton(socket.AF_INET6, host) if hostAddr == ('\x00' * 15) + '\x01': return False @@ -84,5 +96,6 @@ def isHostInPrivateIPRange(host): return True return False -def addDataPadding(data, desiredMsgLength = 12, paddingChar = '\x00'): + +def addDataPadding(data, desiredMsgLength=12, paddingChar='\x00'): return data + paddingChar * (desiredMsgLength - len(data)) diff --git a/src/shutdown.py b/src/shutdown.py index a066104c..c2587236 100644 --- a/src/shutdown.py +++ b/src/shutdown.py @@ -12,53 +12,68 @@ from helper_threading import StoppableThread from knownnodes import saveKnownNodes from inventory import Inventory import protocol -from queues import addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue +from queues import ( + addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue) import shared import state + def doCleanShutdown(): - state.shutdown = 1 #Used to tell proof of work worker threads and the objectProcessorThread to exit. - protocol.broadcastToSendDataQueues((0, 'shutdown', 'no data')) + # Used to tell proof of work worker threads and the objectProcessorThread + # to exit. + state.shutdown = 1 + protocol.broadcastToSendDataQueues((0, 'shutdown', 'no data')) objectProcessorQueue.put(('checkShutdownVariable', 'no data')) for thread in threading.enumerate(): if thread.isAlive() and isinstance(thread, StoppableThread): thread.stopThread() - - UISignalQueue.put(('updateStatusBar','Saving the knownNodes list of peers to disk...')) + + UISignalQueue.put(( + 'updateStatusBar', + 'Saving the knownNodes list of peers to disk...')) logger.info('Saving knownNodes list of peers to disk') saveKnownNodes() logger.info('Done saving knownNodes list of peers to disk') - UISignalQueue.put(('updateStatusBar','Done saving the knownNodes list of peers to disk.')) + UISignalQueue.put(( + 'updateStatusBar', + 'Done saving the knownNodes list of peers to disk.')) logger.info('Flushing inventory in memory out to disk...') UISignalQueue.put(( 'updateStatusBar', - 'Flushing inventory in memory out to disk. This should normally only take a second...')) + 'Flushing inventory in memory out to disk.' + ' This should normally only take a second...')) Inventory().flush() - # Verify that the objectProcessor has finished exiting. It should have incremented the - # shutdown variable from 1 to 2. This must finish before we command the sqlThread to exit. + # Verify that the objectProcessor has finished exiting. It should have + # incremented the shutdown variable from 1 to 2. This must finish before + # we command the sqlThread to exit. while state.shutdown == 1: time.sleep(.1) - - # This one last useless query will guarantee that the previous flush committed and that the + + # This one last useless query will guarantee that the previous flush + # committed and that the # objectProcessorThread committed before we close the program. sqlQuery('SELECT address FROM subscriptions') logger.info('Finished flushing inventory.') sqlStoredProcedure('exit') - - # Wait long enough to guarantee that any running proof of work worker threads will check the - # shutdown variable and exit. If the main thread closes before they do then they won't stop. + + # Wait long enough to guarantee that any running proof of work worker + # threads will check the shutdown variable and exit. If the main thread + # closes before they do then they won't stop. time.sleep(.25) for thread in threading.enumerate(): if isinstance(thread, sendDataThread): - thread.sendDataThreadQueue.put((0, 'shutdown','no data')) - if thread is not threading.currentThread() and isinstance(thread, StoppableThread) and not isinstance(thread, outgoingSynSender): + thread.sendDataThreadQueue.put((0, 'shutdown', 'no data')) + if thread is not threading.currentThread() \ + and isinstance(thread, StoppableThread) \ + and not isinstance(thread, outgoingSynSender): logger.debug("Waiting for thread %s", thread.name) thread.join() # flush queued - for queue in (workerQueue, UISignalQueue, addressGeneratorQueue, objectProcessorQueue): + for queue in (workerQueue, UISignalQueue, addressGeneratorQueue, + objectProcessorQueue): while True: try: queue.get(False) diff --git a/src/singleinstance.py b/src/singleinstance.py index 7a025945..a7e4e7ea 100644 --- a/src/singleinstance.py +++ b/src/singleinstance.py @@ -1,30 +1,32 @@ #! /usr/bin/env python import atexit -import errno -from multiprocessing import Process import os import sys import state try: import fcntl # @UnresolvedImport -except: +except ImportError: pass + class singleinstance: """ - Implements a single instance application by creating a lock file at appdata. + Implements a single instance application by creating a lock file + at appdata. - This is based upon the singleton class from tendo https://github.com/pycontribs/tendo - which is under the Python Software Foundation License version 2 + This is based upon the singleton class from tendo + https://github.com/pycontribs/tendo + which is under the Python Software Foundation License version 2 """ def __init__(self, flavor_id="", daemon=False): self.initialized = False self.counter = 0 self.daemon = daemon self.lockPid = None - self.lockfile = os.path.normpath(os.path.join(state.appdata, 'singleton%s.lock' % flavor_id)) + self.lockfile = os.path.normpath( + os.path.join(state.appdata, 'singleton%s.lock' % flavor_id)) if not self.daemon and not state.curses: # Tells the already running (if any) application to get focus. @@ -41,14 +43,21 @@ class singleinstance: self.lockPid = os.getpid() if sys.platform == 'win32': try: - # file already exists, we try to remove (in case previous execution was interrupted) + # file already exists, we try to remove + # (in case previous execution was interrupted) if os.path.exists(self.lockfile): os.unlink(self.lockfile) - self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC) + self.fd = os.open( + self.lockfile, + os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC + ) except OSError: type, e, tb = sys.exc_info() if e.errno == 13: - print 'Another instance of this application is already running' + print( + 'Another instance of this application' + ' is already running' + ) sys.exit(-1) print(e.errno) raise @@ -59,7 +68,8 @@ class singleinstance: self.fp = open(self.lockfile, 'a+') try: if self.daemon and self.lockPid != os.getpid(): - fcntl.lockf(self.fp, fcntl.LOCK_EX) # wait for parent to finish + # wait for parent to finish + fcntl.lockf(self.fp, fcntl.LOCK_EX) else: fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB) self.lockPid = os.getpid() @@ -88,5 +98,5 @@ class singleinstance: fcntl.lockf(self.fp, fcntl.LOCK_UN) if os.path.isfile(self.lockfile): os.unlink(self.lockfile) - except Exception, e: + except Exception: pass