Fixes and refactoring
- fixes errors introduced in the earlier refactoring - more variables moved to state.py - path finding functions moved to paths.py - remembers IPv6 network unreachable (in the future can be used to skip IPv6 for a while)
This commit is contained in:
parent
5d2bebae28
commit
ac348e4e6b
27
src/api.py
27
src/api.py
|
@ -24,6 +24,7 @@ import helper_inbox
|
||||||
import helper_sent
|
import helper_sent
|
||||||
import hashlib
|
import hashlib
|
||||||
|
|
||||||
|
import state
|
||||||
from pyelliptic.openssl import OpenSSL
|
from pyelliptic.openssl import OpenSSL
|
||||||
from struct import pack
|
from struct import pack
|
||||||
|
|
||||||
|
@ -251,15 +252,15 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
elif len(params) == 3:
|
elif len(params) == 3:
|
||||||
label, eighteenByteRipe, totalDifficulty = params
|
label, eighteenByteRipe, totalDifficulty = params
|
||||||
nonceTrialsPerByte = int(
|
nonceTrialsPerByte = int(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
protocol.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||||
payloadLengthExtraBytes = BMConfigParser().get(
|
payloadLengthExtraBytes = BMConfigParser().get(
|
||||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||||
elif len(params) == 4:
|
elif len(params) == 4:
|
||||||
label, eighteenByteRipe, totalDifficulty, smallMessageDifficulty = params
|
label, eighteenByteRipe, totalDifficulty, smallMessageDifficulty = params
|
||||||
nonceTrialsPerByte = int(
|
nonceTrialsPerByte = int(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
protocol.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||||
payloadLengthExtraBytes = int(
|
payloadLengthExtraBytes = int(
|
||||||
shared.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
|
protocol.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
|
||||||
else:
|
else:
|
||||||
raise APIError(0, 'Too many parameters!')
|
raise APIError(0, 'Too many parameters!')
|
||||||
label = self._decode(label, "base64")
|
label = self._decode(label, "base64")
|
||||||
|
@ -319,15 +320,15 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
elif len(params) == 6:
|
elif len(params) == 6:
|
||||||
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe, totalDifficulty = params
|
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe, totalDifficulty = params
|
||||||
nonceTrialsPerByte = int(
|
nonceTrialsPerByte = int(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
protocol.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||||
payloadLengthExtraBytes = BMConfigParser().get(
|
payloadLengthExtraBytes = BMConfigParser().get(
|
||||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||||
elif len(params) == 7:
|
elif len(params) == 7:
|
||||||
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe, totalDifficulty, smallMessageDifficulty = params
|
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe, totalDifficulty, smallMessageDifficulty = params
|
||||||
nonceTrialsPerByte = int(
|
nonceTrialsPerByte = int(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
protocol.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||||
payloadLengthExtraBytes = int(
|
payloadLengthExtraBytes = int(
|
||||||
shared.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
|
protocol.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
|
||||||
else:
|
else:
|
||||||
raise APIError(0, 'Too many parameters!')
|
raise APIError(0, 'Too many parameters!')
|
||||||
if len(passphrase) == 0:
|
if len(passphrase) == 0:
|
||||||
|
@ -450,7 +451,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
if not BMConfigParser().safeGetBoolean(address, 'chan'):
|
if not BMConfigParser().safeGetBoolean(address, 'chan'):
|
||||||
raise APIError(25, 'Specified address is not a chan address. Use deleteAddress API call instead.')
|
raise APIError(25, 'Specified address is not a chan address. Use deleteAddress API call instead.')
|
||||||
BMConfigParser().remove_section(address)
|
BMConfigParser().remove_section(address)
|
||||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
with open(state.appdata + 'keys.dat', 'wb') as configfile:
|
||||||
BMConfigParser().write(configfile)
|
BMConfigParser().write(configfile)
|
||||||
return 'success'
|
return 'success'
|
||||||
|
|
||||||
|
@ -464,7 +465,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
if not BMConfigParser().has_section(address):
|
if not BMConfigParser().has_section(address):
|
||||||
raise APIError(13, 'Could not find this address in your keys.dat file.')
|
raise APIError(13, 'Could not find this address in your keys.dat file.')
|
||||||
BMConfigParser().remove_section(address)
|
BMConfigParser().remove_section(address)
|
||||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
with open(state.appdata + 'keys.dat', 'wb') as configfile:
|
||||||
BMConfigParser().write(configfile)
|
BMConfigParser().write(configfile)
|
||||||
shared.UISignalQueue.put(('rerenderMessagelistFromLabels',''))
|
shared.UISignalQueue.put(('rerenderMessagelistFromLabels',''))
|
||||||
shared.UISignalQueue.put(('rerenderMessagelistToLabels',''))
|
shared.UISignalQueue.put(('rerenderMessagelistToLabels',''))
|
||||||
|
@ -837,7 +838,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
# Let us do the POW and attach it to the front
|
# Let us do the POW and attach it to the front
|
||||||
target = 2**64 / ((len(encryptedPayload)+requiredPayloadLengthExtraBytes+8) * requiredAverageProofOfWorkNonceTrialsPerByte)
|
target = 2**64 / ((len(encryptedPayload)+requiredPayloadLengthExtraBytes+8) * requiredAverageProofOfWorkNonceTrialsPerByte)
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print '(For msg message via API) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes
|
print '(For msg message via API) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / protocol.networkDefaultPayloadLengthExtraBytes
|
||||||
powStartTime = time.time()
|
powStartTime = time.time()
|
||||||
initialHash = hashlib.sha512(encryptedPayload).digest()
|
initialHash = hashlib.sha512(encryptedPayload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
|
@ -856,7 +857,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
objectType, toStreamNumber, encryptedPayload, int(time.time()) + TTL,'')
|
objectType, toStreamNumber, encryptedPayload, int(time.time()) + TTL,'')
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Broadcasting inv for msg(API disseminatePreEncryptedMsg command):', hexlify(inventoryHash)
|
print 'Broadcasting inv for msg(API disseminatePreEncryptedMsg command):', hexlify(inventoryHash)
|
||||||
shared.broadcastToSendDataQueues((
|
protocol.broadcastToSendDataQueues((
|
||||||
toStreamNumber, 'advertiseobject', inventoryHash))
|
toStreamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
def HandleTrashSentMessageByAckDAta(self, params):
|
def HandleTrashSentMessageByAckDAta(self, params):
|
||||||
|
@ -879,8 +880,8 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
payload = self._decode(payload, "hex")
|
payload = self._decode(payload, "hex")
|
||||||
|
|
||||||
# Let us do the POW
|
# Let us do the POW
|
||||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
target = 2 ** 64 / ((len(payload) + protocol.networkDefaultPayloadLengthExtraBytes +
|
||||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
8) * protocol.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||||
print '(For pubkey message via API) Doing proof of work...'
|
print '(For pubkey message via API) Doing proof of work...'
|
||||||
initialHash = hashlib.sha512(payload).digest()
|
initialHash = hashlib.sha512(payload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
|
@ -903,7 +904,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
objectType, pubkeyStreamNumber, payload, int(time.time()) + TTL,'')
|
objectType, pubkeyStreamNumber, payload, int(time.time()) + TTL,'')
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'broadcasting inv within API command disseminatePubkey with hash:', hexlify(inventoryHash)
|
print 'broadcasting inv within API command disseminatePubkey with hash:', hexlify(inventoryHash)
|
||||||
shared.broadcastToSendDataQueues((
|
protocol.broadcastToSendDataQueues((
|
||||||
streamNumber, 'advertiseobject', inventoryHash))
|
streamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
def HandleGetMessageDataByDestinationHash(self, params):
|
def HandleGetMessageDataByDestinationHash(self, params):
|
||||||
|
|
|
@ -22,6 +22,7 @@ from dialog import Dialog
|
||||||
from helper_sql import *
|
from helper_sql import *
|
||||||
|
|
||||||
import shared
|
import shared
|
||||||
|
import ConfigParser
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
from addresses import *
|
from addresses import *
|
||||||
from pyelliptic.openssl import OpenSSL
|
from pyelliptic.openssl import OpenSSL
|
||||||
|
|
|
@ -28,6 +28,7 @@ from helper_startup import isOurOperatingSystemLimitedToHavingVeryFewHalfOpenCon
|
||||||
|
|
||||||
import shared
|
import shared
|
||||||
from helper_sql import sqlQuery
|
from helper_sql import sqlQuery
|
||||||
|
import state
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
# Classes
|
# Classes
|
||||||
|
@ -49,7 +50,7 @@ import helper_generic
|
||||||
from helper_threading import *
|
from helper_threading import *
|
||||||
|
|
||||||
def connectToStream(streamNumber):
|
def connectToStream(streamNumber):
|
||||||
shared.streamsInWhichIAmParticipating[streamNumber] = 'no data'
|
state.streamsInWhichIAmParticipating[streamNumber] = 'no data'
|
||||||
selfInitiatedConnections[streamNumber] = {}
|
selfInitiatedConnections[streamNumber] = {}
|
||||||
|
|
||||||
if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections():
|
if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections():
|
||||||
|
@ -146,10 +147,10 @@ class singleAPI(threading.Thread, StoppableThread):
|
||||||
selfInitiatedConnections = {}
|
selfInitiatedConnections = {}
|
||||||
|
|
||||||
if shared.useVeryEasyProofOfWorkForTesting:
|
if shared.useVeryEasyProofOfWorkForTesting:
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte = int(
|
protocol.networkDefaultProofOfWorkNonceTrialsPerByte = int(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte / 100)
|
protocol.networkDefaultProofOfWorkNonceTrialsPerByte / 100)
|
||||||
shared.networkDefaultPayloadLengthExtraBytes = int(
|
protocol.networkDefaultPayloadLengthExtraBytes = int(
|
||||||
shared.networkDefaultPayloadLengthExtraBytes / 100)
|
protocol.networkDefaultPayloadLengthExtraBytes / 100)
|
||||||
|
|
||||||
class Main:
|
class Main:
|
||||||
def start(self, daemon=False):
|
def start(self, daemon=False):
|
||||||
|
|
|
@ -76,7 +76,10 @@ from dialogs import AddAddressDialog
|
||||||
from class_objectHashHolder import objectHashHolder
|
from class_objectHashHolder import objectHashHolder
|
||||||
from class_singleWorker import singleWorker
|
from class_singleWorker import singleWorker
|
||||||
from helper_generic import powQueueSize, invQueueSize
|
from helper_generic import powQueueSize, invQueueSize
|
||||||
|
import paths
|
||||||
from proofofwork import getPowType
|
from proofofwork import getPowType
|
||||||
|
import protocol
|
||||||
|
import state
|
||||||
from statusbar import BMStatusBar
|
from statusbar import BMStatusBar
|
||||||
from version import softwareVersion
|
from version import softwareVersion
|
||||||
|
|
||||||
|
@ -100,13 +103,13 @@ def change_translation(newlocale):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
qmytranslator = QtCore.QTranslator()
|
qmytranslator = QtCore.QTranslator()
|
||||||
translationpath = os.path.join (shared.codePath(), 'translations', 'bitmessage_' + newlocale)
|
translationpath = os.path.join (paths.codePath(), 'translations', 'bitmessage_' + newlocale)
|
||||||
qmytranslator.load(translationpath)
|
qmytranslator.load(translationpath)
|
||||||
QtGui.QApplication.installTranslator(qmytranslator)
|
QtGui.QApplication.installTranslator(qmytranslator)
|
||||||
|
|
||||||
qsystranslator = QtCore.QTranslator()
|
qsystranslator = QtCore.QTranslator()
|
||||||
if shared.frozen:
|
if paths.frozen:
|
||||||
translationpath = os.path.join (shared.codePath(), 'translations', 'qt_' + newlocale)
|
translationpath = os.path.join (paths.codePath(), 'translations', 'qt_' + newlocale)
|
||||||
else:
|
else:
|
||||||
translationpath = os.path.join (str(QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath)), 'qt_' + newlocale)
|
translationpath = os.path.join (str(QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath)), 'qt_' + newlocale)
|
||||||
qsystranslator.load(translationpath)
|
qsystranslator.load(translationpath)
|
||||||
|
@ -1360,9 +1363,9 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
# if the address had a known label in the address book
|
# if the address had a known label in the address book
|
||||||
if label is not None:
|
if label is not None:
|
||||||
# Does a sound file exist for this particular contact?
|
# Does a sound file exist for this particular contact?
|
||||||
if (os.path.isfile(shared.appdata + 'sounds/' + label + '.wav') or
|
if (os.path.isfile(state.appdata + 'sounds/' + label + '.wav') or
|
||||||
os.path.isfile(shared.appdata + 'sounds/' + label + '.mp3')):
|
os.path.isfile(state.appdata + 'sounds/' + label + '.mp3')):
|
||||||
soundFilename = shared.appdata + 'sounds/' + label
|
soundFilename = state.appdata + 'sounds/' + label
|
||||||
|
|
||||||
# Avoid making sounds more frequently than the threshold.
|
# Avoid making sounds more frequently than the threshold.
|
||||||
# This suppresses playing sounds repeatedly when there
|
# This suppresses playing sounds repeatedly when there
|
||||||
|
@ -1378,19 +1381,19 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
if soundFilename is None:
|
if soundFilename is None:
|
||||||
# the sound is for an address which exists in the address book
|
# the sound is for an address which exists in the address book
|
||||||
if category is self.SOUND_KNOWN:
|
if category is self.SOUND_KNOWN:
|
||||||
soundFilename = shared.appdata + 'sounds/known'
|
soundFilename = state.appdata + 'sounds/known'
|
||||||
# the sound is for an unknown address
|
# the sound is for an unknown address
|
||||||
elif category is self.SOUND_UNKNOWN:
|
elif category is self.SOUND_UNKNOWN:
|
||||||
soundFilename = shared.appdata + 'sounds/unknown'
|
soundFilename = state.appdata + 'sounds/unknown'
|
||||||
# initial connection sound
|
# initial connection sound
|
||||||
elif category is self.SOUND_CONNECTED:
|
elif category is self.SOUND_CONNECTED:
|
||||||
soundFilename = shared.appdata + 'sounds/connected'
|
soundFilename = state.appdata + 'sounds/connected'
|
||||||
# disconnected sound
|
# disconnected sound
|
||||||
elif category is self.SOUND_DISCONNECTED:
|
elif category is self.SOUND_DISCONNECTED:
|
||||||
soundFilename = shared.appdata + 'sounds/disconnected'
|
soundFilename = state.appdata + 'sounds/disconnected'
|
||||||
# sound when the connection status becomes green
|
# sound when the connection status becomes green
|
||||||
elif category is self.SOUND_CONNECTION_GREEN:
|
elif category is self.SOUND_CONNECTION_GREEN:
|
||||||
soundFilename = shared.appdata + 'sounds/green'
|
soundFilename = state.appdata + 'sounds/green'
|
||||||
|
|
||||||
if soundFilename is not None and play is True:
|
if soundFilename is not None and play is True:
|
||||||
if not self.isConnectionSound(category):
|
if not self.isConnectionSound(category):
|
||||||
|
@ -1526,7 +1529,7 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
# menu button 'manage keys'
|
# menu button 'manage keys'
|
||||||
def click_actionManageKeys(self):
|
def click_actionManageKeys(self):
|
||||||
if 'darwin' in sys.platform or 'linux' in sys.platform:
|
if 'darwin' in sys.platform or 'linux' in sys.platform:
|
||||||
if shared.appdata == '':
|
if state.appdata == '':
|
||||||
# reply = QtGui.QMessageBox.information(self, 'keys.dat?','You
|
# reply = QtGui.QMessageBox.information(self, 'keys.dat?','You
|
||||||
# may manage your keys by editing the keys.dat file stored in
|
# may manage your keys by editing the keys.dat file stored in
|
||||||
# the same directory as this program. It is important that you
|
# the same directory as this program. It is important that you
|
||||||
|
@ -1536,14 +1539,14 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
|
|
||||||
else:
|
else:
|
||||||
QtGui.QMessageBox.information(self, 'keys.dat?', _translate(
|
QtGui.QMessageBox.information(self, 'keys.dat?', _translate(
|
||||||
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file.").arg(shared.appdata), QMessageBox.Ok)
|
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file.").arg(state.appdata), QMessageBox.Ok)
|
||||||
elif sys.platform == 'win32' or sys.platform == 'win64':
|
elif sys.platform == 'win32' or sys.platform == 'win64':
|
||||||
if shared.appdata == '':
|
if state.appdata == '':
|
||||||
reply = QtGui.QMessageBox.question(self, _translate("MainWindow", "Open keys.dat?"), _translate(
|
reply = QtGui.QMessageBox.question(self, _translate("MainWindow", "Open keys.dat?"), _translate(
|
||||||
"MainWindow", "You may manage your keys by editing the keys.dat file stored in the same directory as this program. It is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)"), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
|
"MainWindow", "You may manage your keys by editing the keys.dat file stored in the same directory as this program. It is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)"), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
|
||||||
else:
|
else:
|
||||||
reply = QtGui.QMessageBox.question(self, _translate("MainWindow", "Open keys.dat?"), _translate(
|
reply = QtGui.QMessageBox.question(self, _translate("MainWindow", "Open keys.dat?"), _translate(
|
||||||
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)").arg(shared.appdata), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
|
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)").arg(state.appdata), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
|
||||||
if reply == QtGui.QMessageBox.Yes:
|
if reply == QtGui.QMessageBox.Yes:
|
||||||
shared.openKeysFile()
|
shared.openKeysFile()
|
||||||
|
|
||||||
|
@ -2409,10 +2412,10 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
# Demanded difficulty tab
|
# Demanded difficulty tab
|
||||||
if float(self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) >= 1:
|
if float(self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) >= 1:
|
||||||
BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(int(float(
|
BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) * protocol.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||||
if float(self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) >= 1:
|
if float(self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) >= 1:
|
||||||
BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(int(float(
|
BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
|
self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) * protocol.networkDefaultPayloadLengthExtraBytes)))
|
||||||
|
|
||||||
if self.settingsDialogInstance.ui.comboBoxOpenCL.currentText().toUtf8() != BMConfigParser().safeGet("bitmessagesettings", "opencl"):
|
if self.settingsDialogInstance.ui.comboBoxOpenCL.currentText().toUtf8() != BMConfigParser().safeGet("bitmessagesettings", "opencl"):
|
||||||
BMConfigParser().set('bitmessagesettings', 'opencl', str(self.settingsDialogInstance.ui.comboBoxOpenCL.currentText()))
|
BMConfigParser().set('bitmessagesettings', 'opencl', str(self.settingsDialogInstance.ui.comboBoxOpenCL.currentText()))
|
||||||
|
@ -2421,18 +2424,18 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
|
|
||||||
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) == 0:
|
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) == 0:
|
||||||
if BMConfigParser().get('bitmessagesettings','maxacceptablenoncetrialsperbyte') != str(int(float(
|
if BMConfigParser().get('bitmessagesettings','maxacceptablenoncetrialsperbyte') != str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)):
|
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * protocol.networkDefaultProofOfWorkNonceTrialsPerByte)):
|
||||||
# the user changed the max acceptable total difficulty
|
# the user changed the max acceptable total difficulty
|
||||||
acceptableDifficultyChanged = True
|
acceptableDifficultyChanged = True
|
||||||
BMConfigParser().set('bitmessagesettings', 'maxacceptablenoncetrialsperbyte', str(int(float(
|
BMConfigParser().set('bitmessagesettings', 'maxacceptablenoncetrialsperbyte', str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * protocol.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||||
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) == 0:
|
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) == 0:
|
||||||
if BMConfigParser().get('bitmessagesettings','maxacceptablepayloadlengthextrabytes') != str(int(float(
|
if BMConfigParser().get('bitmessagesettings','maxacceptablepayloadlengthextrabytes') != str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)):
|
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * protocol.networkDefaultPayloadLengthExtraBytes)):
|
||||||
# the user changed the max acceptable small message difficulty
|
# the user changed the max acceptable small message difficulty
|
||||||
acceptableDifficultyChanged = True
|
acceptableDifficultyChanged = True
|
||||||
BMConfigParser().set('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', str(int(float(
|
BMConfigParser().set('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
|
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * protocol.networkDefaultPayloadLengthExtraBytes)))
|
||||||
if acceptableDifficultyChanged:
|
if acceptableDifficultyChanged:
|
||||||
# It might now be possible to send msgs which were previously marked as toodifficult.
|
# It might now be possible to send msgs which were previously marked as toodifficult.
|
||||||
# Let us change them to 'msgqueued'. The singleWorker will try to send them and will again
|
# Let us change them to 'msgqueued'. The singleWorker will try to send them and will again
|
||||||
|
@ -2493,21 +2496,21 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
# startup for linux
|
# startup for linux
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if shared.appdata != shared.lookupExeFolder() and self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we are NOT using portable mode now but the user selected that we should...
|
if state.appdata != paths.lookupExeFolder() and self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we are NOT using portable mode now but the user selected that we should...
|
||||||
# Write the keys.dat file to disk in the new location
|
# Write the keys.dat file to disk in the new location
|
||||||
sqlStoredProcedure('movemessagstoprog')
|
sqlStoredProcedure('movemessagstoprog')
|
||||||
with open(shared.lookupExeFolder() + 'keys.dat', 'wb') as configfile:
|
with open(paths.lookupExeFolder() + 'keys.dat', 'wb') as configfile:
|
||||||
BMConfigParser().write(configfile)
|
BMConfigParser().write(configfile)
|
||||||
# Write the knownnodes.dat file to disk in the new location
|
# Write the knownnodes.dat file to disk in the new location
|
||||||
shared.knownNodesLock.acquire()
|
shared.knownNodesLock.acquire()
|
||||||
output = open(shared.lookupExeFolder() + 'knownnodes.dat', 'wb')
|
output = open(paths.lookupExeFolder() + 'knownnodes.dat', 'wb')
|
||||||
pickle.dump(shared.knownNodes, output)
|
pickle.dump(shared.knownNodes, output)
|
||||||
output.close()
|
output.close()
|
||||||
shared.knownNodesLock.release()
|
shared.knownNodesLock.release()
|
||||||
os.remove(shared.appdata + 'keys.dat')
|
os.remove(state.appdata + 'keys.dat')
|
||||||
os.remove(shared.appdata + 'knownnodes.dat')
|
os.remove(state.appdata + 'knownnodes.dat')
|
||||||
previousAppdataLocation = shared.appdata
|
previousAppdataLocation = state.appdata
|
||||||
shared.appdata = shared.lookupExeFolder()
|
state.appdata = paths.lookupExeFolder()
|
||||||
debug.restartLoggingInUpdatedAppdataLocation()
|
debug.restartLoggingInUpdatedAppdataLocation()
|
||||||
try:
|
try:
|
||||||
os.remove(previousAppdataLocation + 'debug.log')
|
os.remove(previousAppdataLocation + 'debug.log')
|
||||||
|
@ -2515,25 +2518,25 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if shared.appdata == shared.lookupExeFolder() and not self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we ARE using portable mode now but the user selected that we shouldn't...
|
if state.appdata == paths.lookupExeFolder() and not self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we ARE using portable mode now but the user selected that we shouldn't...
|
||||||
shared.appdata = shared.lookupAppdataFolder()
|
state.appdata = paths.lookupAppdataFolder()
|
||||||
if not os.path.exists(shared.appdata):
|
if not os.path.exists(state.appdata):
|
||||||
os.makedirs(shared.appdata)
|
os.makedirs(state.appdata)
|
||||||
sqlStoredProcedure('movemessagstoappdata')
|
sqlStoredProcedure('movemessagstoappdata')
|
||||||
# Write the keys.dat file to disk in the new location
|
# Write the keys.dat file to disk in the new location
|
||||||
shared.writeKeysFile()
|
shared.writeKeysFile()
|
||||||
# Write the knownnodes.dat file to disk in the new location
|
# Write the knownnodes.dat file to disk in the new location
|
||||||
shared.knownNodesLock.acquire()
|
shared.knownNodesLock.acquire()
|
||||||
output = open(shared.appdata + 'knownnodes.dat', 'wb')
|
output = open(state.appdata + 'knownnodes.dat', 'wb')
|
||||||
pickle.dump(shared.knownNodes, output)
|
pickle.dump(shared.knownNodes, output)
|
||||||
output.close()
|
output.close()
|
||||||
shared.knownNodesLock.release()
|
shared.knownNodesLock.release()
|
||||||
os.remove(shared.lookupExeFolder() + 'keys.dat')
|
os.remove(paths.lookupExeFolder() + 'keys.dat')
|
||||||
os.remove(shared.lookupExeFolder() + 'knownnodes.dat')
|
os.remove(paths.lookupExeFolder() + 'knownnodes.dat')
|
||||||
debug.restartLoggingInUpdatedAppdataLocation()
|
debug.restartLoggingInUpdatedAppdataLocation()
|
||||||
try:
|
try:
|
||||||
os.remove(shared.lookupExeFolder() + 'debug.log')
|
os.remove(paths.lookupExeFolder() + 'debug.log')
|
||||||
os.remove(shared.lookupExeFolder() + 'debug.log.1')
|
os.remove(paths.lookupExeFolder() + 'debug.log.1')
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -3621,8 +3624,8 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
currentRow, 0).setIcon(avatarize(addressAtCurrentRow))
|
currentRow, 0).setIcon(avatarize(addressAtCurrentRow))
|
||||||
|
|
||||||
def setAvatar(self, addressAtCurrentRow):
|
def setAvatar(self, addressAtCurrentRow):
|
||||||
if not os.path.exists(shared.appdata + 'avatars/'):
|
if not os.path.exists(state.appdata + 'avatars/'):
|
||||||
os.makedirs(shared.appdata + 'avatars/')
|
os.makedirs(state.appdata + 'avatars/')
|
||||||
hash = hashlib.md5(addBMIfNotPresent(addressAtCurrentRow)).hexdigest()
|
hash = hashlib.md5(addBMIfNotPresent(addressAtCurrentRow)).hexdigest()
|
||||||
extensions = ['PNG', 'GIF', 'JPG', 'JPEG', 'SVG', 'BMP', 'MNG', 'PBM', 'PGM', 'PPM', 'TIFF', 'XBM', 'XPM', 'TGA']
|
extensions = ['PNG', 'GIF', 'JPG', 'JPEG', 'SVG', 'BMP', 'MNG', 'PBM', 'PGM', 'PPM', 'TIFF', 'XBM', 'XPM', 'TGA']
|
||||||
# http://pyqt.sourceforge.net/Docs/PyQt4/qimagereader.html#supportedImageFormats
|
# http://pyqt.sourceforge.net/Docs/PyQt4/qimagereader.html#supportedImageFormats
|
||||||
|
@ -3633,8 +3636,8 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
for ext in extensions:
|
for ext in extensions:
|
||||||
filters += [ names[ext] + ' (*.' + ext.lower() + ')' ]
|
filters += [ names[ext] + ' (*.' + ext.lower() + ')' ]
|
||||||
all_images_filter += [ '*.' + ext.lower() ]
|
all_images_filter += [ '*.' + ext.lower() ]
|
||||||
upper = shared.appdata + 'avatars/' + hash + '.' + ext.upper()
|
upper = state.appdata + 'avatars/' + hash + '.' + ext.upper()
|
||||||
lower = shared.appdata + 'avatars/' + hash + '.' + ext.lower()
|
lower = state.appdata + 'avatars/' + hash + '.' + ext.lower()
|
||||||
if os.path.isfile(lower):
|
if os.path.isfile(lower):
|
||||||
current_files += [lower]
|
current_files += [lower]
|
||||||
elif os.path.isfile(upper):
|
elif os.path.isfile(upper):
|
||||||
|
@ -3643,7 +3646,7 @@ class MyForm(settingsmixin.SMainWindow):
|
||||||
filters[1:1] = ['All files (*.*)']
|
filters[1:1] = ['All files (*.*)']
|
||||||
sourcefile = QFileDialog.getOpenFileName(self, _translate("MainWindow","Set avatar..."), filter = ';;'.join(filters))
|
sourcefile = QFileDialog.getOpenFileName(self, _translate("MainWindow","Set avatar..."), filter = ';;'.join(filters))
|
||||||
# determine the correct filename (note that avatars don't use the suffix)
|
# determine the correct filename (note that avatars don't use the suffix)
|
||||||
destination = shared.appdata + 'avatars/' + hash + '.' + sourcefile.split('.')[-1]
|
destination = state.appdata + 'avatars/' + hash + '.' + sourcefile.split('.')[-1]
|
||||||
exists = QtCore.QFile.exists(destination)
|
exists = QtCore.QFile.exists(destination)
|
||||||
if sourcefile == '':
|
if sourcefile == '':
|
||||||
# ask for removal of avatar
|
# ask for removal of avatar
|
||||||
|
@ -4021,12 +4024,12 @@ class settingsDialog(QtGui.QDialog):
|
||||||
self.ui.checkBoxReplyBelow.setChecked(
|
self.ui.checkBoxReplyBelow.setChecked(
|
||||||
BMConfigParser().safeGetBoolean('bitmessagesettings', 'replybelow'))
|
BMConfigParser().safeGetBoolean('bitmessagesettings', 'replybelow'))
|
||||||
|
|
||||||
if shared.appdata == shared.lookupExeFolder():
|
if state.appdata == paths.lookupExeFolder():
|
||||||
self.ui.checkBoxPortableMode.setChecked(True)
|
self.ui.checkBoxPortableMode.setChecked(True)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
import tempfile
|
import tempfile
|
||||||
file = tempfile.NamedTemporaryFile(dir=shared.lookupExeFolder(), delete=True)
|
file = tempfile.NamedTemporaryFile(dir=paths.lookupExeFolder(), delete=True)
|
||||||
file.close # should autodelete
|
file.close # should autodelete
|
||||||
except:
|
except:
|
||||||
self.ui.checkBoxPortableMode.setDisabled(True)
|
self.ui.checkBoxPortableMode.setDisabled(True)
|
||||||
|
@ -4086,15 +4089,15 @@ class settingsDialog(QtGui.QDialog):
|
||||||
|
|
||||||
# Demanded difficulty tab
|
# Demanded difficulty tab
|
||||||
self.ui.lineEditTotalDifficulty.setText(str((float(BMConfigParser().getint(
|
self.ui.lineEditTotalDifficulty.setText(str((float(BMConfigParser().getint(
|
||||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
'bitmessagesettings', 'defaultnoncetrialsperbyte')) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||||
self.ui.lineEditSmallMessageDifficulty.setText(str((float(BMConfigParser().getint(
|
self.ui.lineEditSmallMessageDifficulty.setText(str((float(BMConfigParser().getint(
|
||||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')) / shared.networkDefaultPayloadLengthExtraBytes)))
|
'bitmessagesettings', 'defaultpayloadlengthextrabytes')) / protocol.networkDefaultPayloadLengthExtraBytes)))
|
||||||
|
|
||||||
# Max acceptable difficulty tab
|
# Max acceptable difficulty tab
|
||||||
self.ui.lineEditMaxAcceptableTotalDifficulty.setText(str((float(BMConfigParser().getint(
|
self.ui.lineEditMaxAcceptableTotalDifficulty.setText(str((float(BMConfigParser().getint(
|
||||||
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||||
self.ui.lineEditMaxAcceptableSmallMessageDifficulty.setText(str((float(BMConfigParser().getint(
|
self.ui.lineEditMaxAcceptableSmallMessageDifficulty.setText(str((float(BMConfigParser().getint(
|
||||||
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / shared.networkDefaultPayloadLengthExtraBytes)))
|
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / protocol.networkDefaultPayloadLengthExtraBytes)))
|
||||||
|
|
||||||
# OpenCL
|
# OpenCL
|
||||||
if openclpow.openclAvailable():
|
if openclpow.openclAvailable():
|
||||||
|
|
|
@ -15,7 +15,6 @@ from messagecompose import MessageCompose
|
||||||
import settingsmixin
|
import settingsmixin
|
||||||
from networkstatus import NetworkStatus
|
from networkstatus import NetworkStatus
|
||||||
from blacklist import Blacklist
|
from blacklist import Blacklist
|
||||||
import shared
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_fromUtf8 = QtCore.QString.fromUtf8
|
_fromUtf8 = QtCore.QString.fromUtf8
|
||||||
|
|
|
@ -3,7 +3,7 @@ import os
|
||||||
from PyQt4 import QtCore, QtGui
|
from PyQt4 import QtCore, QtGui
|
||||||
|
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
from shared import codePath
|
import paths
|
||||||
|
|
||||||
class LanguageBox(QtGui.QComboBox):
|
class LanguageBox(QtGui.QComboBox):
|
||||||
languageName = {"system": "System Settings", "eo": "Esperanto", "en_pirate": "Pirate English"}
|
languageName = {"system": "System Settings", "eo": "Esperanto", "en_pirate": "Pirate English"}
|
||||||
|
@ -14,7 +14,7 @@ class LanguageBox(QtGui.QComboBox):
|
||||||
def populate(self):
|
def populate(self):
|
||||||
self.languages = []
|
self.languages = []
|
||||||
self.clear()
|
self.clear()
|
||||||
localesPath = os.path.join (codePath(), 'translations')
|
localesPath = os.path.join (paths.codePath(), 'translations')
|
||||||
configuredLocale = "system"
|
configuredLocale = "system"
|
||||||
try:
|
try:
|
||||||
configuredLocale = BMConfigParser().get('bitmessagesettings', 'userlocale', "system")
|
configuredLocale = BMConfigParser().get('bitmessagesettings', 'userlocale', "system")
|
||||||
|
|
|
@ -11,9 +11,11 @@ from foldertree import AccountMixin
|
||||||
from helper_sql import *
|
from helper_sql import *
|
||||||
from l10n import getTranslationLanguage
|
from l10n import getTranslationLanguage
|
||||||
from openclpow import openclAvailable, openclEnabled
|
from openclpow import openclAvailable, openclEnabled
|
||||||
|
import paths
|
||||||
from proofofwork import bmpow
|
from proofofwork import bmpow
|
||||||
from pyelliptic.openssl import OpenSSL
|
from pyelliptic.openssl import OpenSSL
|
||||||
import shared
|
import shared
|
||||||
|
import state
|
||||||
from version import softwareVersion
|
from version import softwareVersion
|
||||||
|
|
||||||
# this is BM support address going to Peter Surda
|
# this is BM support address going to Peter Surda
|
||||||
|
@ -63,7 +65,7 @@ def checkHasNormalAddress():
|
||||||
|
|
||||||
def createAddressIfNeeded(myapp):
|
def createAddressIfNeeded(myapp):
|
||||||
if not checkHasNormalAddress():
|
if not checkHasNormalAddress():
|
||||||
shared.addressGeneratorQueue.put(('createRandomAddress', 4, 1, str(QtGui.QApplication.translate("Support", SUPPORT_MY_LABEL)), 1, "", False, shared.networkDefaultProofOfWorkNonceTrialsPerByte, shared.networkDefaultPayloadLengthExtraBytes))
|
shared.addressGeneratorQueue.put(('createRandomAddress', 4, 1, str(QtGui.QApplication.translate("Support", SUPPORT_MY_LABEL)), 1, "", False, protocol.networkDefaultProofOfWorkNonceTrialsPerByte, protocol.networkDefaultPayloadLengthExtraBytes))
|
||||||
while shared.shutdown == 0 and not checkHasNormalAddress():
|
while shared.shutdown == 0 and not checkHasNormalAddress():
|
||||||
time.sleep(.2)
|
time.sleep(.2)
|
||||||
myapp.rerenderComboBoxSendFrom()
|
myapp.rerenderComboBoxSendFrom()
|
||||||
|
@ -104,9 +106,9 @@ def createSupportMessage(myapp):
|
||||||
opensslversion = "%s (Python internal), %s (external for PyElliptic)" % (ssl.OPENSSL_VERSION, OpenSSL._lib.SSLeay_version(SSLEAY_VERSION))
|
opensslversion = "%s (Python internal), %s (external for PyElliptic)" % (ssl.OPENSSL_VERSION, OpenSSL._lib.SSLeay_version(SSLEAY_VERSION))
|
||||||
|
|
||||||
frozen = "N/A"
|
frozen = "N/A"
|
||||||
if shared.frozen:
|
if paths.frozen:
|
||||||
frozen = shared.frozen
|
frozen = paths.frozen
|
||||||
portablemode = "True" if shared.appdata == shared.lookupExeFolder() else "False"
|
portablemode = "True" if state.appdata == paths.lookupExeFolder() else "False"
|
||||||
cpow = "True" if bmpow else "False"
|
cpow = "True" if bmpow else "False"
|
||||||
#cpow = QtGui.QApplication.translate("Support", cpow)
|
#cpow = QtGui.QApplication.translate("Support", cpow)
|
||||||
openclpow = str(BMConfigParser().safeGet('bitmessagesettings', 'opencl')) if openclEnabled() else "None"
|
openclpow = str(BMConfigParser().safeGet('bitmessagesettings', 'opencl')) if openclEnabled() else "None"
|
||||||
|
|
|
@ -4,6 +4,7 @@ import os
|
||||||
import shared
|
import shared
|
||||||
from addresses import addBMIfNotPresent
|
from addresses import addBMIfNotPresent
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
|
import state
|
||||||
|
|
||||||
str_broadcast_subscribers = '[Broadcast subscribers]'
|
str_broadcast_subscribers = '[Broadcast subscribers]'
|
||||||
str_chan = '[chan]'
|
str_chan = '[chan]'
|
||||||
|
@ -82,8 +83,8 @@ def avatarize(address):
|
||||||
extensions = ['PNG', 'GIF', 'JPG', 'JPEG', 'SVG', 'BMP', 'MNG', 'PBM', 'PGM', 'PPM', 'TIFF', 'XBM', 'XPM', 'TGA']
|
extensions = ['PNG', 'GIF', 'JPG', 'JPEG', 'SVG', 'BMP', 'MNG', 'PBM', 'PGM', 'PPM', 'TIFF', 'XBM', 'XPM', 'TGA']
|
||||||
# try to find a specific avatar
|
# try to find a specific avatar
|
||||||
for ext in extensions:
|
for ext in extensions:
|
||||||
lower_hash = shared.appdata + 'avatars/' + hash + '.' + ext.lower()
|
lower_hash = state.appdata + 'avatars/' + hash + '.' + ext.lower()
|
||||||
upper_hash = shared.appdata + 'avatars/' + hash + '.' + ext.upper()
|
upper_hash = state.appdata + 'avatars/' + hash + '.' + ext.upper()
|
||||||
if os.path.isfile(lower_hash):
|
if os.path.isfile(lower_hash):
|
||||||
# print 'found avatar of ', address
|
# print 'found avatar of ', address
|
||||||
idcon.addFile(lower_hash)
|
idcon.addFile(lower_hash)
|
||||||
|
@ -94,8 +95,8 @@ def avatarize(address):
|
||||||
return idcon
|
return idcon
|
||||||
# if we haven't found any, try to find a default avatar
|
# if we haven't found any, try to find a default avatar
|
||||||
for ext in extensions:
|
for ext in extensions:
|
||||||
lower_default = shared.appdata + 'avatars/' + 'default.' + ext.lower()
|
lower_default = state.appdata + 'avatars/' + 'default.' + ext.lower()
|
||||||
upper_default = shared.appdata + 'avatars/' + 'default.' + ext.upper()
|
upper_default = state.appdata + 'avatars/' + 'default.' + ext.upper()
|
||||||
if os.path.isfile(lower_default):
|
if os.path.isfile(lower_default):
|
||||||
default = lower_default
|
default = lower_default
|
||||||
idcon.addFile(lower_default)
|
idcon.addFile(lower_default)
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
from PyQt4 import uic
|
from PyQt4 import uic
|
||||||
import os.path
|
import os.path
|
||||||
|
import paths
|
||||||
import sys
|
import sys
|
||||||
from shared import codePath
|
|
||||||
|
|
||||||
def resource_path(resFile):
|
def resource_path(resFile):
|
||||||
baseDir = codePath()
|
baseDir = paths.codePath()
|
||||||
for subDir in ["ui", "bitmessageqt"]:
|
for subDir in ["ui", "bitmessageqt"]:
|
||||||
if os.path.isdir(os.path.join(baseDir, subDir)) and os.path.isfile(os.path.join(baseDir, subDir, resFile)):
|
if os.path.isdir(os.path.join(baseDir, subDir)) and os.path.isfile(os.path.join(baseDir, subDir, resFile)):
|
||||||
return os.path.join(baseDir, subDir, resFile)
|
return os.path.join(baseDir, subDir, resFile)
|
||||||
|
|
|
@ -77,13 +77,13 @@ class addressGenerator(threading.Thread, StoppableThread):
|
||||||
if nonceTrialsPerByte == 0:
|
if nonceTrialsPerByte == 0:
|
||||||
nonceTrialsPerByte = BMConfigParser().getint(
|
nonceTrialsPerByte = BMConfigParser().getint(
|
||||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
||||||
if nonceTrialsPerByte < shared.networkDefaultProofOfWorkNonceTrialsPerByte:
|
if nonceTrialsPerByte < protocol.networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||||
nonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
nonceTrialsPerByte = protocol.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||||
if payloadLengthExtraBytes == 0:
|
if payloadLengthExtraBytes == 0:
|
||||||
payloadLengthExtraBytes = BMConfigParser().getint(
|
payloadLengthExtraBytes = BMConfigParser().getint(
|
||||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||||
if payloadLengthExtraBytes < shared.networkDefaultPayloadLengthExtraBytes:
|
if payloadLengthExtraBytes < protocol.networkDefaultPayloadLengthExtraBytes:
|
||||||
payloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
payloadLengthExtraBytes = protocol.networkDefaultPayloadLengthExtraBytes
|
||||||
if command == 'createRandomAddress':
|
if command == 'createRandomAddress':
|
||||||
shared.UISignalQueue.put((
|
shared.UISignalQueue.put((
|
||||||
'updateStatusBar', tr._translate("MainWindow", "Generating one new address")))
|
'updateStatusBar', tr._translate("MainWindow", "Generating one new address")))
|
||||||
|
|
|
@ -467,7 +467,7 @@ class objectProcessor(threading.Thread):
|
||||||
toAddress, 'noncetrialsperbyte')
|
toAddress, 'noncetrialsperbyte')
|
||||||
requiredPayloadLengthExtraBytes = BMConfigParser().getint(
|
requiredPayloadLengthExtraBytes = BMConfigParser().getint(
|
||||||
toAddress, 'payloadlengthextrabytes')
|
toAddress, 'payloadlengthextrabytes')
|
||||||
if not shared.isProofOfWorkSufficient(data, requiredNonceTrialsPerByte, requiredPayloadLengthExtraBytes):
|
if not protocol.isProofOfWorkSufficient(data, requiredNonceTrialsPerByte, requiredPayloadLengthExtraBytes):
|
||||||
logger.info('Proof of work in msg is insufficient only because it does not meet our higher requirement.')
|
logger.info('Proof of work in msg is insufficient only because it does not meet our higher requirement.')
|
||||||
return
|
return
|
||||||
blockMessage = False # Gets set to True if the user shouldn't see the message according to black or white lists.
|
blockMessage = False # Gets set to True if the user shouldn't see the message according to black or white lists.
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
import errno
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import random
|
import random
|
||||||
|
@ -12,6 +13,7 @@ from class_sendDataThread import *
|
||||||
from class_receiveDataThread import *
|
from class_receiveDataThread import *
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
from helper_threading import *
|
from helper_threading import *
|
||||||
|
import state
|
||||||
|
|
||||||
# For each stream to which we connect, several outgoingSynSender threads
|
# For each stream to which we connect, several outgoingSynSender threads
|
||||||
# will exist and will collectively create 8 connections with peers.
|
# will exist and will collectively create 8 connections with peers.
|
||||||
|
@ -252,12 +254,16 @@ class outgoingSynSender(threading.Thread, StoppableThread):
|
||||||
logger.debug('SOCKS5 error: %s', str(err))
|
logger.debug('SOCKS5 error: %s', str(err))
|
||||||
else:
|
else:
|
||||||
logger.error('SOCKS5 error: %s', str(err))
|
logger.error('SOCKS5 error: %s', str(err))
|
||||||
|
if err[0][0] == 4 or err[0][0] == 2:
|
||||||
|
state.networkProtocolLastFailed['IPv6'] = time.time()
|
||||||
except socks.Socks4Error as err:
|
except socks.Socks4Error as err:
|
||||||
logger.error('Socks4Error: ' + str(err))
|
logger.error('Socks4Error: ' + str(err))
|
||||||
except socket.error as err:
|
except socket.error as err:
|
||||||
if BMConfigParser().get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
|
if BMConfigParser().get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
|
||||||
logger.error('Bitmessage MIGHT be having trouble connecting to the SOCKS server. ' + str(err))
|
logger.error('Bitmessage MIGHT be having trouble connecting to the SOCKS server. ' + str(err))
|
||||||
else:
|
else:
|
||||||
|
if ":" in peer.host and err[0] == errno.ENETUNREACH:
|
||||||
|
state.networkProtocolLastFailed['IPv6'] = time.time()
|
||||||
if shared.verbose >= 1:
|
if shared.verbose >= 1:
|
||||||
logger.debug('Could NOT connect to ' + str(peer) + 'during outgoing attempt. ' + str(err))
|
logger.debug('Could NOT connect to ' + str(peer) + 'during outgoing attempt. ' + str(err))
|
||||||
|
|
||||||
|
|
|
@ -28,8 +28,10 @@ from class_objectHashHolder import objectHashHolder
|
||||||
from helper_generic import addDataPadding, isHostInPrivateIPRange
|
from helper_generic import addDataPadding, isHostInPrivateIPRange
|
||||||
from helper_sql import sqlQuery
|
from helper_sql import sqlQuery
|
||||||
from debug import logger
|
from debug import logger
|
||||||
|
import paths
|
||||||
import protocol
|
import protocol
|
||||||
from inventory import Inventory
|
from inventory import Inventory
|
||||||
|
import state
|
||||||
import tr
|
import tr
|
||||||
from version import softwareVersion
|
from version import softwareVersion
|
||||||
|
|
||||||
|
@ -291,7 +293,7 @@ class receiveDataThread(threading.Thread):
|
||||||
if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) and
|
if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) and
|
||||||
protocol.haveSSL(not self.initiatedConnection)):
|
protocol.haveSSL(not self.initiatedConnection)):
|
||||||
logger.debug("Initialising TLS")
|
logger.debug("Initialising TLS")
|
||||||
self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(shared.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(shared.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
|
self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(paths.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(paths.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
|
||||||
if hasattr(self.sslSock, "context"):
|
if hasattr(self.sslSock, "context"):
|
||||||
self.sslSock.context.set_ecdh_curve("secp256k1")
|
self.sslSock.context.set_ecdh_curve("secp256k1")
|
||||||
while True:
|
while True:
|
||||||
|
@ -320,12 +322,12 @@ class receiveDataThread(threading.Thread):
|
||||||
shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
|
shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
|
||||||
logger.debug('Connection fully established with ' + str(self.peer) + "\n" + \
|
logger.debug('Connection fully established with ' + str(self.peer) + "\n" + \
|
||||||
'The size of the connectedHostsList is now ' + str(len(shared.connectedHostsList)) + "\n" + \
|
'The size of the connectedHostsList is now ' + str(len(shared.connectedHostsList)) + "\n" + \
|
||||||
'The length of sendDataQueues is now: ' + str(len(shared.sendDataQueues)) + "\n" + \
|
'The length of sendDataQueues is now: ' + str(len(state.sendDataQueues)) + "\n" + \
|
||||||
'broadcasting addr from within connectionFullyEstablished function.')
|
'broadcasting addr from within connectionFullyEstablished function.')
|
||||||
|
|
||||||
# Let all of our peers know about this new node.
|
# Let all of our peers know about this new node.
|
||||||
dataToSend = (int(time.time()), self.streamNumber, 1, self.peer.host, self.remoteNodeIncomingPort)
|
dataToSend = (int(time.time()), self.streamNumber, 1, self.peer.host, self.remoteNodeIncomingPort)
|
||||||
shared.broadcastToSendDataQueues((
|
protocol.broadcastToSendDataQueues((
|
||||||
self.streamNumber, 'advertisepeer', dataToSend))
|
self.streamNumber, 'advertisepeer', dataToSend))
|
||||||
|
|
||||||
self.sendaddr() # This is one large addr message to this one peer.
|
self.sendaddr() # This is one large addr message to this one peer.
|
||||||
|
@ -594,7 +596,7 @@ class receiveDataThread(threading.Thread):
|
||||||
hostDetails = (
|
hostDetails = (
|
||||||
timeSomeoneElseReceivedMessageFromThisNode,
|
timeSomeoneElseReceivedMessageFromThisNode,
|
||||||
recaddrStream, recaddrServices, hostStandardFormat, recaddrPort)
|
recaddrStream, recaddrServices, hostStandardFormat, recaddrPort)
|
||||||
shared.broadcastToSendDataQueues((
|
protocol.broadcastToSendDataQueues((
|
||||||
self.streamNumber, 'advertisepeer', hostDetails))
|
self.streamNumber, 'advertisepeer', hostDetails))
|
||||||
else:
|
else:
|
||||||
timeLastReceivedMessageFromThisNode = shared.knownNodes[recaddrStream][
|
timeLastReceivedMessageFromThisNode = shared.knownNodes[recaddrStream][
|
||||||
|
|
|
@ -14,6 +14,7 @@ from class_objectHashHolder import *
|
||||||
from addresses import *
|
from addresses import *
|
||||||
from debug import logger
|
from debug import logger
|
||||||
import protocol
|
import protocol
|
||||||
|
import state
|
||||||
|
|
||||||
# Every connection to a peer has a sendDataThread (and also a
|
# Every connection to a peer has a sendDataThread (and also a
|
||||||
# receiveDataThread).
|
# receiveDataThread).
|
||||||
|
@ -22,7 +23,7 @@ class sendDataThread(threading.Thread):
|
||||||
def __init__(self, sendDataThreadQueue):
|
def __init__(self, sendDataThreadQueue):
|
||||||
threading.Thread.__init__(self, name="sendData")
|
threading.Thread.__init__(self, name="sendData")
|
||||||
self.sendDataThreadQueue = sendDataThreadQueue
|
self.sendDataThreadQueue = sendDataThreadQueue
|
||||||
shared.sendDataQueues.append(self.sendDataThreadQueue)
|
state.sendDataQueues.append(self.sendDataThreadQueue)
|
||||||
self.data = ''
|
self.data = ''
|
||||||
self.objectHashHolderInstance = objectHashHolder(self.sendDataThreadQueue)
|
self.objectHashHolderInstance = objectHashHolder(self.sendDataThreadQueue)
|
||||||
self.objectHashHolderInstance.start()
|
self.objectHashHolderInstance.start()
|
||||||
|
@ -102,7 +103,7 @@ class sendDataThread(threading.Thread):
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
logger.debug('sendDataThread starting. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(shared.sendDataQueues)))
|
logger.debug('sendDataThread starting. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(state.sendDataQueues)))
|
||||||
while True:
|
while True:
|
||||||
deststream, command, data = self.sendDataThreadQueue.get()
|
deststream, command, data = self.sendDataThreadQueue.get()
|
||||||
|
|
||||||
|
@ -190,6 +191,6 @@ class sendDataThread(threading.Thread):
|
||||||
self.sock.close()
|
self.sock.close()
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
shared.sendDataQueues.remove(self.sendDataThreadQueue)
|
state.sendDataQueues.remove(self.sendDataThreadQueue)
|
||||||
logger.info('sendDataThread ending. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(shared.sendDataQueues)))
|
logger.info('sendDataThread ending. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(state.sendDataQueues)))
|
||||||
self.objectHashHolderInstance.close()
|
self.objectHashHolderInstance.close()
|
||||||
|
|
|
@ -11,7 +11,8 @@ from helper_sql import *
|
||||||
from helper_threading import *
|
from helper_threading import *
|
||||||
from inventory import Inventory
|
from inventory import Inventory
|
||||||
from debug import logger
|
from debug import logger
|
||||||
from state import neededPubkeys
|
import protocol
|
||||||
|
import state
|
||||||
|
|
||||||
"""
|
"""
|
||||||
The singleCleaner class is a timer-driven thread that cleans data structures
|
The singleCleaner class is a timer-driven thread that cleans data structures
|
||||||
|
@ -53,7 +54,7 @@ class singleCleaner(threading.Thread, StoppableThread):
|
||||||
Inventory().flush()
|
Inventory().flush()
|
||||||
shared.UISignalQueue.put(('updateStatusBar', ''))
|
shared.UISignalQueue.put(('updateStatusBar', ''))
|
||||||
|
|
||||||
shared.broadcastToSendDataQueues((
|
protocol.broadcastToSendDataQueues((
|
||||||
0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
|
0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
|
||||||
# If we are running as a daemon then we are going to fill up the UI
|
# If we are running as a daemon then we are going to fill up the UI
|
||||||
# queue which will never be handled by a UI. We should clear it to
|
# queue which will never be handled by a UI. We should clear it to
|
||||||
|
@ -98,7 +99,7 @@ class singleCleaner(threading.Thread, StoppableThread):
|
||||||
# Let us write out the knowNodes to disk if there is anything new to write out.
|
# Let us write out the knowNodes to disk if there is anything new to write out.
|
||||||
if shared.needToWriteKnownNodesToDisk:
|
if shared.needToWriteKnownNodesToDisk:
|
||||||
shared.knownNodesLock.acquire()
|
shared.knownNodesLock.acquire()
|
||||||
output = open(shared.appdata + 'knownnodes.dat', 'wb')
|
output = open(state.appdata + 'knownnodes.dat', 'wb')
|
||||||
try:
|
try:
|
||||||
pickle.dump(shared.knownNodes, output)
|
pickle.dump(shared.knownNodes, output)
|
||||||
output.close()
|
output.close()
|
||||||
|
@ -116,7 +117,7 @@ class singleCleaner(threading.Thread, StoppableThread):
|
||||||
def resendPubkeyRequest(address):
|
def resendPubkeyRequest(address):
|
||||||
logger.debug('It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.')
|
logger.debug('It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.')
|
||||||
try:
|
try:
|
||||||
del neededPubkeys[
|
del state.neededPubkeys[
|
||||||
address] # We need to take this entry out of the neededPubkeys structure because the shared.workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
|
address] # We need to take this entry out of the neededPubkeys structure because the shared.workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -165,7 +165,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
payload += pubEncryptionKey[1:]
|
payload += pubEncryptionKey[1:]
|
||||||
|
|
||||||
# Do the POW for this pubkey message
|
# Do the POW for this pubkey message
|
||||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
logger.info('(For pubkey message) Doing proof of work...')
|
logger.info('(For pubkey message) Doing proof of work...')
|
||||||
initialHash = hashlib.sha512(payload).digest()
|
initialHash = hashlib.sha512(payload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
|
@ -255,7 +255,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
payload += signature
|
payload += signature
|
||||||
|
|
||||||
# Do the POW for this pubkey message
|
# Do the POW for this pubkey message
|
||||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
logger.info('(For pubkey message) Doing proof of work...')
|
logger.info('(For pubkey message) Doing proof of work...')
|
||||||
initialHash = hashlib.sha512(payload).digest()
|
initialHash = hashlib.sha512(payload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
|
@ -345,7 +345,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
dataToEncrypt, hexlify(pubEncryptionKey))
|
dataToEncrypt, hexlify(pubEncryptionKey))
|
||||||
|
|
||||||
# Do the POW for this pubkey message
|
# Do the POW for this pubkey message
|
||||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
logger.info('(For pubkey message) Doing proof of work...')
|
logger.info('(For pubkey message) Doing proof of work...')
|
||||||
initialHash = hashlib.sha512(payload).digest()
|
initialHash = hashlib.sha512(payload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
|
@ -466,7 +466,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
payload += highlevelcrypto.encrypt(
|
payload += highlevelcrypto.encrypt(
|
||||||
dataToEncrypt, hexlify(pubEncryptionKey))
|
dataToEncrypt, hexlify(pubEncryptionKey))
|
||||||
|
|
||||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
logger.info('(For broadcast message) Doing proof of work...')
|
logger.info('(For broadcast message) Doing proof of work...')
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||||
ackdata, tr._translate("MainWindow", "Doing work necessary to send broadcast..."))))
|
ackdata, tr._translate("MainWindow", "Doing work necessary to send broadcast..."))))
|
||||||
|
@ -659,8 +659,8 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
|
|
||||||
# Let us fetch the amount of work required by the recipient.
|
# Let us fetch the amount of work required by the recipient.
|
||||||
if toAddressVersionNumber == 2:
|
if toAddressVersionNumber == 2:
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
requiredAverageProofOfWorkNonceTrialsPerByte = protocol.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||||
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
requiredPayloadLengthExtraBytes = protocol.networkDefaultPayloadLengthExtraBytes
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||||
ackdata, tr._translate("MainWindow", "Doing work necessary to send message.\nThere is no required difficulty for version 2 addresses like this."))))
|
ackdata, tr._translate("MainWindow", "Doing work necessary to send message.\nThere is no required difficulty for version 2 addresses like this."))))
|
||||||
elif toAddressVersionNumber >= 3:
|
elif toAddressVersionNumber >= 3:
|
||||||
|
@ -670,13 +670,13 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
requiredPayloadLengthExtraBytes, varintLength = decodeVarint(
|
requiredPayloadLengthExtraBytes, varintLength = decodeVarint(
|
||||||
pubkeyPayload[readPosition:readPosition + 10])
|
pubkeyPayload[readPosition:readPosition + 10])
|
||||||
readPosition += varintLength
|
readPosition += varintLength
|
||||||
if requiredAverageProofOfWorkNonceTrialsPerByte < shared.networkDefaultProofOfWorkNonceTrialsPerByte: # We still have to meet a minimum POW difficulty regardless of what they say is allowed in order to get our message to propagate through the network.
|
if requiredAverageProofOfWorkNonceTrialsPerByte < protocol.networkDefaultProofOfWorkNonceTrialsPerByte: # We still have to meet a minimum POW difficulty regardless of what they say is allowed in order to get our message to propagate through the network.
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
requiredAverageProofOfWorkNonceTrialsPerByte = protocol.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||||
if requiredPayloadLengthExtraBytes < shared.networkDefaultPayloadLengthExtraBytes:
|
if requiredPayloadLengthExtraBytes < protocol.networkDefaultPayloadLengthExtraBytes:
|
||||||
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
requiredPayloadLengthExtraBytes = protocol.networkDefaultPayloadLengthExtraBytes
|
||||||
logger.debug('Using averageProofOfWorkNonceTrialsPerByte: %s and payloadLengthExtraBytes: %s.' % (requiredAverageProofOfWorkNonceTrialsPerByte, requiredPayloadLengthExtraBytes))
|
logger.debug('Using averageProofOfWorkNonceTrialsPerByte: %s and payloadLengthExtraBytes: %s.' % (requiredAverageProofOfWorkNonceTrialsPerByte, requiredPayloadLengthExtraBytes))
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr._translate("MainWindow", "Doing work necessary to send message.\nReceiver\'s required difficulty: %1 and %2").arg(str(float(
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr._translate("MainWindow", "Doing work necessary to send message.\nReceiver\'s required difficulty: %1 and %2").arg(str(float(
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)))))
|
requiredAverageProofOfWorkNonceTrialsPerByte) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(requiredPayloadLengthExtraBytes) / protocol.networkDefaultPayloadLengthExtraBytes)))))
|
||||||
if status != 'forcepow':
|
if status != 'forcepow':
|
||||||
if (requiredAverageProofOfWorkNonceTrialsPerByte > BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte') and BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte') != 0) or (requiredPayloadLengthExtraBytes > BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') and BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') != 0):
|
if (requiredAverageProofOfWorkNonceTrialsPerByte > BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte') and BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte') != 0) or (requiredPayloadLengthExtraBytes > BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') and BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') != 0):
|
||||||
# The demanded difficulty is more than we are willing
|
# The demanded difficulty is more than we are willing
|
||||||
|
@ -684,8 +684,8 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'''UPDATE sent SET status='toodifficult' WHERE ackdata=? ''',
|
'''UPDATE sent SET status='toodifficult' WHERE ackdata=? ''',
|
||||||
ackdata)
|
ackdata)
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr._translate("MainWindow", "Problem: The work demanded by the recipient (%1 and %2) is more difficult than you are willing to do. %3").arg(str(float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr._translate("MainWindow", "Problem: The work demanded by the recipient (%1 and %2) is more difficult than you are willing to do. %3").arg(str(float(requiredAverageProofOfWorkNonceTrialsPerByte) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(
|
||||||
requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)).arg(l10n.formatTimestamp()))))
|
requiredPayloadLengthExtraBytes) / protocol.networkDefaultPayloadLengthExtraBytes)).arg(l10n.formatTimestamp()))))
|
||||||
continue
|
continue
|
||||||
else: # if we are sending a message to ourselves or a chan..
|
else: # if we are sending a message to ourselves or a chan..
|
||||||
logger.info('Sending a message.')
|
logger.info('Sending a message.')
|
||||||
|
@ -703,8 +703,8 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
privEncryptionKeyBase58))
|
privEncryptionKeyBase58))
|
||||||
pubEncryptionKeyBase256 = unhexlify(highlevelcrypto.privToPub(
|
pubEncryptionKeyBase256 = unhexlify(highlevelcrypto.privToPub(
|
||||||
privEncryptionKeyHex))[1:]
|
privEncryptionKeyHex))[1:]
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
requiredAverageProofOfWorkNonceTrialsPerByte = protocol.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||||
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
requiredPayloadLengthExtraBytes = protocol.networkDefaultPayloadLengthExtraBytes
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||||
ackdata, tr._translate("MainWindow", "Doing work necessary to send message."))))
|
ackdata, tr._translate("MainWindow", "Doing work necessary to send message."))))
|
||||||
|
|
||||||
|
@ -747,9 +747,9 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
# the receiver is in any of those lists.
|
# the receiver is in any of those lists.
|
||||||
if shared.isAddressInMyAddressBookSubscriptionsListOrWhitelist(toaddress):
|
if shared.isAddressInMyAddressBookSubscriptionsListOrWhitelist(toaddress):
|
||||||
payload += encodeVarint(
|
payload += encodeVarint(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
protocol.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||||
payload += encodeVarint(
|
payload += encodeVarint(
|
||||||
shared.networkDefaultPayloadLengthExtraBytes)
|
protocol.networkDefaultPayloadLengthExtraBytes)
|
||||||
else:
|
else:
|
||||||
payload += encodeVarint(BMConfigParser().getint(
|
payload += encodeVarint(BMConfigParser().getint(
|
||||||
fromaddress, 'noncetrialsperbyte'))
|
fromaddress, 'noncetrialsperbyte'))
|
||||||
|
@ -790,7 +790,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
encryptedPayload += encodeVarint(1) # msg version
|
encryptedPayload += encodeVarint(1) # msg version
|
||||||
encryptedPayload += encodeVarint(toStreamNumber) + encrypted
|
encryptedPayload += encodeVarint(toStreamNumber) + encrypted
|
||||||
target = 2 ** 64 / (requiredAverageProofOfWorkNonceTrialsPerByte*(len(encryptedPayload) + 8 + requiredPayloadLengthExtraBytes + ((TTL*(len(encryptedPayload)+8+requiredPayloadLengthExtraBytes))/(2 ** 16))))
|
target = 2 ** 64 / (requiredAverageProofOfWorkNonceTrialsPerByte*(len(encryptedPayload) + 8 + requiredPayloadLengthExtraBytes + ((TTL*(len(encryptedPayload)+8+requiredPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
logger.info('(For msg message) Doing proof of work. Total required difficulty: %f. Required small message difficulty: %f.', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)
|
logger.info('(For msg message) Doing proof of work. Total required difficulty: %f. Required small message difficulty: %f.', float(requiredAverageProofOfWorkNonceTrialsPerByte) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte, float(requiredPayloadLengthExtraBytes) / protocol.networkDefaultPayloadLengthExtraBytes)
|
||||||
|
|
||||||
powStartTime = time.time()
|
powStartTime = time.time()
|
||||||
initialHash = hashlib.sha512(encryptedPayload).digest()
|
initialHash = hashlib.sha512(encryptedPayload).digest()
|
||||||
|
@ -913,7 +913,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByToAddress', (
|
shared.UISignalQueue.put(('updateSentItemStatusByToAddress', (
|
||||||
toAddress, tr._translate("MainWindow",'Doing work necessary to request encryption key.'))))
|
toAddress, tr._translate("MainWindow",'Doing work necessary to request encryption key.'))))
|
||||||
|
|
||||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
initialHash = hashlib.sha512(payload).digest()
|
initialHash = hashlib.sha512(payload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
logger.info('Found proof of work ' + str(trialValue) + ' Nonce: ' + str(nonce))
|
logger.info('Found proof of work ' + str(trialValue) + ' Nonce: ' + str(nonce))
|
||||||
|
@ -966,7 +966,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
||||||
payload += encodeVarint(1) # msg version
|
payload += encodeVarint(1) # msg version
|
||||||
payload += encodeVarint(toStreamNumber) + ackdata
|
payload += encodeVarint(toStreamNumber) + ackdata
|
||||||
|
|
||||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
logger.info('(For ack message) Doing proof of work. TTL set to ' + str(TTL))
|
logger.info('(For ack message) Doing proof of work. TTL set to ' + str(TTL))
|
||||||
|
|
||||||
powStartTime = time.time()
|
powStartTime = time.time()
|
||||||
|
|
|
@ -8,7 +8,9 @@ import sys
|
||||||
import os
|
import os
|
||||||
from debug import logger
|
from debug import logger
|
||||||
from namecoin import ensureNamecoinOptions
|
from namecoin import ensureNamecoinOptions
|
||||||
|
import paths
|
||||||
import random
|
import random
|
||||||
|
import state
|
||||||
import string
|
import string
|
||||||
import tr#anslate
|
import tr#anslate
|
||||||
|
|
||||||
|
@ -23,7 +25,7 @@ class sqlThread(threading.Thread):
|
||||||
threading.Thread.__init__(self, name="SQL")
|
threading.Thread.__init__(self, name="SQL")
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.conn = sqlite3.connect(shared.appdata + 'messages.dat')
|
self.conn = sqlite3.connect(state.appdata + 'messages.dat')
|
||||||
self.conn.text_factory = str
|
self.conn.text_factory = str
|
||||||
self.cur = self.conn.cursor()
|
self.cur = self.conn.cursor()
|
||||||
|
|
||||||
|
@ -112,9 +114,9 @@ class sqlThread(threading.Thread):
|
||||||
|
|
||||||
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 4:
|
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 4:
|
||||||
BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
protocol.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||||
BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
|
BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
|
||||||
shared.networkDefaultPayloadLengthExtraBytes))
|
protocol.networkDefaultPayloadLengthExtraBytes))
|
||||||
BMConfigParser().set('bitmessagesettings', 'settingsversion', '5')
|
BMConfigParser().set('bitmessagesettings', 'settingsversion', '5')
|
||||||
|
|
||||||
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 5:
|
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 5:
|
||||||
|
@ -235,8 +237,8 @@ class sqlThread(threading.Thread):
|
||||||
# Raise the default required difficulty from 1 to 2
|
# Raise the default required difficulty from 1 to 2
|
||||||
# With the change to protocol v3, this is obsolete.
|
# With the change to protocol v3, this is obsolete.
|
||||||
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 6:
|
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 6:
|
||||||
"""if int(shared.config.get('bitmessagesettings','defaultnoncetrialsperbyte')) == shared.networkDefaultProofOfWorkNonceTrialsPerByte:
|
"""if int(shared.config.get('bitmessagesettings','defaultnoncetrialsperbyte')) == protocol.networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||||
shared.config.set('bitmessagesettings','defaultnoncetrialsperbyte', str(shared.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
|
shared.config.set('bitmessagesettings','defaultnoncetrialsperbyte', str(protocol.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
|
||||||
"""
|
"""
|
||||||
BMConfigParser().set('bitmessagesettings', 'settingsversion', '7')
|
BMConfigParser().set('bitmessagesettings', 'settingsversion', '7')
|
||||||
|
|
||||||
|
@ -302,8 +304,8 @@ class sqlThread(threading.Thread):
|
||||||
|
|
||||||
# With the change to protocol version 3, reset the user-settable difficulties to 1
|
# With the change to protocol version 3, reset the user-settable difficulties to 1
|
||||||
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 8:
|
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 8:
|
||||||
BMConfigParser().set('bitmessagesettings','defaultnoncetrialsperbyte', str(shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
BMConfigParser().set('bitmessagesettings','defaultnoncetrialsperbyte', str(protocol.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||||
BMConfigParser().set('bitmessagesettings','defaultpayloadlengthextrabytes', str(shared.networkDefaultPayloadLengthExtraBytes))
|
BMConfigParser().set('bitmessagesettings','defaultpayloadlengthextrabytes', str(protocol.networkDefaultPayloadLengthExtraBytes))
|
||||||
previousTotalDifficulty = int(BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / 320
|
previousTotalDifficulty = int(BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / 320
|
||||||
previousSmallMessageDifficulty = int(BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / 14000
|
previousSmallMessageDifficulty = int(BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / 14000
|
||||||
BMConfigParser().set('bitmessagesettings','maxacceptablenoncetrialsperbyte', str(previousTotalDifficulty * 1000))
|
BMConfigParser().set('bitmessagesettings','maxacceptablenoncetrialsperbyte', str(previousTotalDifficulty * 1000))
|
||||||
|
@ -331,9 +333,9 @@ class sqlThread(threading.Thread):
|
||||||
|
|
||||||
# sanity check
|
# sanity check
|
||||||
if BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte') == 0:
|
if BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte') == 0:
|
||||||
BMConfigParser().set('bitmessagesettings','maxacceptablenoncetrialsperbyte', str(shared.ridiculousDifficulty * shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
BMConfigParser().set('bitmessagesettings','maxacceptablenoncetrialsperbyte', str(shared.ridiculousDifficulty * protocol.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||||
if BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') == 0:
|
if BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') == 0:
|
||||||
BMConfigParser().set('bitmessagesettings','maxacceptablepayloadlengthextrabytes', str(shared.ridiculousDifficulty * shared.networkDefaultPayloadLengthExtraBytes))
|
BMConfigParser().set('bitmessagesettings','maxacceptablepayloadlengthextrabytes', str(shared.ridiculousDifficulty * protocol.networkDefaultPayloadLengthExtraBytes))
|
||||||
|
|
||||||
# The format of data stored in the pubkeys table has changed. Let's
|
# The format of data stored in the pubkeys table has changed. Let's
|
||||||
# clear it, and the pubkeys from inventory, so that they'll be re-downloaded.
|
# clear it, and the pubkeys from inventory, so that they'll be re-downloaded.
|
||||||
|
@ -507,8 +509,8 @@ class sqlThread(threading.Thread):
|
||||||
os._exit(0)
|
os._exit(0)
|
||||||
self.conn.close()
|
self.conn.close()
|
||||||
shutil.move(
|
shutil.move(
|
||||||
shared.lookupAppdataFolder() + 'messages.dat', shared.lookupExeFolder() + 'messages.dat')
|
paths.lookupAppdataFolder() + 'messages.dat', paths.lookupExeFolder() + 'messages.dat')
|
||||||
self.conn = sqlite3.connect(shared.lookupExeFolder() + 'messages.dat')
|
self.conn = sqlite3.connect(paths.lookupExeFolder() + 'messages.dat')
|
||||||
self.conn.text_factory = str
|
self.conn.text_factory = str
|
||||||
self.cur = self.conn.cursor()
|
self.cur = self.conn.cursor()
|
||||||
elif item == 'movemessagstoappdata':
|
elif item == 'movemessagstoappdata':
|
||||||
|
@ -523,8 +525,8 @@ class sqlThread(threading.Thread):
|
||||||
os._exit(0)
|
os._exit(0)
|
||||||
self.conn.close()
|
self.conn.close()
|
||||||
shutil.move(
|
shutil.move(
|
||||||
shared.lookupExeFolder() + 'messages.dat', shared.lookupAppdataFolder() + 'messages.dat')
|
paths.lookupExeFolder() + 'messages.dat', paths.lookupAppdataFolder() + 'messages.dat')
|
||||||
self.conn = sqlite3.connect(shared.lookupAppdataFolder() + 'messages.dat')
|
self.conn = sqlite3.connect(paths.lookupAppdataFolder() + 'messages.dat')
|
||||||
self.conn.text_factory = str
|
self.conn.text_factory = str
|
||||||
self.cur = self.conn.cursor()
|
self.cur = self.conn.cursor()
|
||||||
elif item == 'deleteandvacuume':
|
elif item == 'deleteandvacuume':
|
||||||
|
|
11
src/debug.py
11
src/debug.py
|
@ -23,6 +23,7 @@ import shared
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
import helper_startup
|
import helper_startup
|
||||||
|
import state
|
||||||
helper_startup.loadConfig()
|
helper_startup.loadConfig()
|
||||||
|
|
||||||
# Now can be overriden from a config file, which uses standard python logging.config.fileConfig interface
|
# Now can be overriden from a config file, which uses standard python logging.config.fileConfig interface
|
||||||
|
@ -36,12 +37,12 @@ def log_uncaught_exceptions(ex_cls, ex, tb):
|
||||||
def configureLogging():
|
def configureLogging():
|
||||||
have_logging = False
|
have_logging = False
|
||||||
try:
|
try:
|
||||||
logging.config.fileConfig(os.path.join (shared.appdata, 'logging.dat'))
|
logging.config.fileConfig(os.path.join (state.appdata, 'logging.dat'))
|
||||||
have_logging = True
|
have_logging = True
|
||||||
print "Loaded logger configuration from %s" % (os.path.join(shared.appdata, 'logging.dat'))
|
print "Loaded logger configuration from %s" % (os.path.join(state.appdata, 'logging.dat'))
|
||||||
except:
|
except:
|
||||||
if os.path.isfile(os.path.join(shared.appdata, 'logging.dat')):
|
if os.path.isfile(os.path.join(state.appdata, 'logging.dat')):
|
||||||
print "Failed to load logger configuration from %s, using default logging config" % (os.path.join(shared.appdata, 'logging.dat'))
|
print "Failed to load logger configuration from %s, using default logging config" % (os.path.join(state.appdata, 'logging.dat'))
|
||||||
print sys.exc_info()
|
print sys.exc_info()
|
||||||
else:
|
else:
|
||||||
# no need to confuse the user if the logger config is missing entirely
|
# no need to confuse the user if the logger config is missing entirely
|
||||||
|
@ -70,7 +71,7 @@ def configureLogging():
|
||||||
'class': 'logging.handlers.RotatingFileHandler',
|
'class': 'logging.handlers.RotatingFileHandler',
|
||||||
'formatter': 'default',
|
'formatter': 'default',
|
||||||
'level': log_level,
|
'level': log_level,
|
||||||
'filename': shared.appdata + 'debug.log',
|
'filename': state.appdata + 'debug.log',
|
||||||
'maxBytes': 2097152, # 2 MiB
|
'maxBytes': 2097152, # 2 MiB
|
||||||
'backupCount': 1,
|
'backupCount': 1,
|
||||||
'encoding': 'UTF-8',
|
'encoding': 'UTF-8',
|
||||||
|
|
|
@ -7,12 +7,13 @@ import time
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
from debug import logger
|
from debug import logger
|
||||||
import socks
|
import socks
|
||||||
|
import state
|
||||||
|
|
||||||
def knownNodes():
|
def knownNodes():
|
||||||
try:
|
try:
|
||||||
# We shouldn't have to use the shared.knownNodesLock because this had
|
# We shouldn't have to use the shared.knownNodesLock because this had
|
||||||
# better be the only thread accessing knownNodes right now.
|
# better be the only thread accessing knownNodes right now.
|
||||||
pickleFile = open(shared.appdata + 'knownnodes.dat', 'rb')
|
pickleFile = open(state.appdata + 'knownnodes.dat', 'rb')
|
||||||
loadedKnownNodes = pickle.load(pickleFile)
|
loadedKnownNodes = pickle.load(pickleFile)
|
||||||
pickleFile.close()
|
pickleFile.close()
|
||||||
# The old format of storing knownNodes was as a 'host: (port, time)'
|
# The old format of storing knownNodes was as a 'host: (port, time)'
|
||||||
|
@ -28,7 +29,7 @@ def knownNodes():
|
||||||
peer, lastseen = node_tuple
|
peer, lastseen = node_tuple
|
||||||
shared.knownNodes[stream][peer] = lastseen
|
shared.knownNodes[stream][peer] = lastseen
|
||||||
except:
|
except:
|
||||||
shared.knownNodes = defaultKnownNodes.createDefaultKnownNodes(shared.appdata)
|
shared.knownNodes = defaultKnownNodes.createDefaultKnownNodes(state.appdata)
|
||||||
# your own onion address, if setup
|
# your own onion address, if setup
|
||||||
if BMConfigParser().has_option('bitmessagesettings', 'onionhostname') and ".onion" in BMConfigParser().get('bitmessagesettings', 'onionhostname'):
|
if BMConfigParser().has_option('bitmessagesettings', 'onionhostname') and ".onion" in BMConfigParser().get('bitmessagesettings', 'onionhostname'):
|
||||||
shared.knownNodes[1][shared.Peer(BMConfigParser().get('bitmessagesettings', 'onionhostname'), BMConfigParser().getint('bitmessagesettings', 'onionport'))] = int(time.time())
|
shared.knownNodes[1][shared.Peer(BMConfigParser().get('bitmessagesettings', 'onionhostname'), BMConfigParser().getint('bitmessagesettings', 'onionport'))] = int(time.time())
|
||||||
|
|
|
@ -10,6 +10,8 @@ import platform
|
||||||
from distutils.version import StrictVersion
|
from distutils.version import StrictVersion
|
||||||
|
|
||||||
from namecoin import ensureNamecoinOptions
|
from namecoin import ensureNamecoinOptions
|
||||||
|
import paths
|
||||||
|
import state
|
||||||
|
|
||||||
storeConfigFilesInSameDirectoryAsProgramByDefault = False # The user may de-select Portable Mode in the settings if they want the config files to stay in the application data folder.
|
storeConfigFilesInSameDirectoryAsProgramByDefault = False # The user may de-select Portable Mode in the settings if they want the config files to stay in the application data folder.
|
||||||
|
|
||||||
|
@ -25,31 +27,31 @@ def _loadTrustedPeer():
|
||||||
shared.trustedPeer = shared.Peer(host, int(port))
|
shared.trustedPeer = shared.Peer(host, int(port))
|
||||||
|
|
||||||
def loadConfig():
|
def loadConfig():
|
||||||
if shared.appdata:
|
if state.appdata:
|
||||||
BMConfigParser().read(shared.appdata + 'keys.dat')
|
BMConfigParser().read(state.appdata + 'keys.dat')
|
||||||
#shared.appdata must have been specified as a startup option.
|
#state.appdata must have been specified as a startup option.
|
||||||
try:
|
try:
|
||||||
BMConfigParser().get('bitmessagesettings', 'settingsversion')
|
BMConfigParser().get('bitmessagesettings', 'settingsversion')
|
||||||
print 'Loading config files from directory specified on startup: ' + shared.appdata
|
print 'Loading config files from directory specified on startup: ' + state.appdata
|
||||||
needToCreateKeysFile = False
|
needToCreateKeysFile = False
|
||||||
except:
|
except:
|
||||||
needToCreateKeysFile = True
|
needToCreateKeysFile = True
|
||||||
|
|
||||||
else:
|
else:
|
||||||
BMConfigParser().read(shared.lookupExeFolder() + 'keys.dat')
|
BMConfigParser().read(paths.lookupExeFolder() + 'keys.dat')
|
||||||
try:
|
try:
|
||||||
BMConfigParser().get('bitmessagesettings', 'settingsversion')
|
BMConfigParser().get('bitmessagesettings', 'settingsversion')
|
||||||
print 'Loading config files from same directory as program.'
|
print 'Loading config files from same directory as program.'
|
||||||
needToCreateKeysFile = False
|
needToCreateKeysFile = False
|
||||||
shared.appdata = shared.lookupExeFolder()
|
state.appdata = paths.lookupExeFolder()
|
||||||
except:
|
except:
|
||||||
# Could not load the keys.dat file in the program directory. Perhaps it
|
# Could not load the keys.dat file in the program directory. Perhaps it
|
||||||
# is in the appdata directory.
|
# is in the appdata directory.
|
||||||
shared.appdata = shared.lookupAppdataFolder()
|
state.appdata = paths.lookupAppdataFolder()
|
||||||
BMConfigParser().read(shared.appdata + 'keys.dat')
|
BMConfigParser().read(state.appdata + 'keys.dat')
|
||||||
try:
|
try:
|
||||||
BMConfigParser().get('bitmessagesettings', 'settingsversion')
|
BMConfigParser().get('bitmessagesettings', 'settingsversion')
|
||||||
print 'Loading existing config files from', shared.appdata
|
print 'Loading existing config files from', state.appdata
|
||||||
needToCreateKeysFile = False
|
needToCreateKeysFile = False
|
||||||
except:
|
except:
|
||||||
needToCreateKeysFile = True
|
needToCreateKeysFile = True
|
||||||
|
@ -90,9 +92,9 @@ def loadConfig():
|
||||||
BMConfigParser().set(
|
BMConfigParser().set(
|
||||||
'bitmessagesettings', 'messagesencrypted', 'false')
|
'bitmessagesettings', 'messagesencrypted', 'false')
|
||||||
BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
protocol.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||||
BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
|
BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
|
||||||
shared.networkDefaultPayloadLengthExtraBytes))
|
protocol.networkDefaultPayloadLengthExtraBytes))
|
||||||
BMConfigParser().set('bitmessagesettings', 'minimizeonclose', 'false')
|
BMConfigParser().set('bitmessagesettings', 'minimizeonclose', 'false')
|
||||||
BMConfigParser().set(
|
BMConfigParser().set(
|
||||||
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', '0')
|
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', '0')
|
||||||
|
@ -127,12 +129,12 @@ def loadConfig():
|
||||||
if storeConfigFilesInSameDirectoryAsProgramByDefault:
|
if storeConfigFilesInSameDirectoryAsProgramByDefault:
|
||||||
# Just use the same directory as the program and forget about
|
# Just use the same directory as the program and forget about
|
||||||
# the appdata folder
|
# the appdata folder
|
||||||
shared.appdata = ''
|
state.appdata = ''
|
||||||
print 'Creating new config files in same directory as program.'
|
print 'Creating new config files in same directory as program.'
|
||||||
else:
|
else:
|
||||||
print 'Creating new config files in', shared.appdata
|
print 'Creating new config files in', state.appdata
|
||||||
if not os.path.exists(shared.appdata):
|
if not os.path.exists(state.appdata):
|
||||||
os.makedirs(shared.appdata)
|
os.makedirs(state.appdata)
|
||||||
if not sys.platform.startswith('win'):
|
if not sys.platform.startswith('win'):
|
||||||
os.umask(0o077)
|
os.umask(0o077)
|
||||||
shared.writeKeysFile()
|
shared.writeKeysFile()
|
||||||
|
|
|
@ -4,7 +4,6 @@ import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
import shared
|
|
||||||
|
|
||||||
|
|
||||||
#logger = logging.getLogger(__name__)
|
#logger = logging.getLogger(__name__)
|
||||||
|
|
|
@ -6,10 +6,11 @@ import sqlite3
|
||||||
from time import strftime, localtime
|
from time import strftime, localtime
|
||||||
import sys
|
import sys
|
||||||
import shared
|
import shared
|
||||||
|
import state
|
||||||
import string
|
import string
|
||||||
from binascii import hexlify
|
from binascii import hexlify
|
||||||
|
|
||||||
appdata = shared.lookupAppdataFolder()
|
appdata = paths.lookupAppdataFolder()
|
||||||
|
|
||||||
conn = sqlite3.connect( appdata + 'messages.dat' )
|
conn = sqlite3.connect( appdata + 'messages.dat' )
|
||||||
conn.text_factory = str
|
conn.text_factory = str
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
import asyncore
|
import asyncore
|
||||||
|
|
||||||
from http import HTTPClient
|
from http import HTTPClient
|
||||||
|
import paths
|
||||||
from tls import TLSHandshake
|
from tls import TLSHandshake
|
||||||
|
|
||||||
# self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(shared.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(shared.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
|
# self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(paths.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(paths.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
|
||||||
|
|
||||||
|
|
||||||
class HTTPSClient(HTTPClient, TLSHandshake):
|
class HTTPSClient(HTTPClient, TLSHandshake):
|
||||||
|
|
|
@ -6,7 +6,8 @@ import random
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
from shared import codePath, shutdown
|
import paths
|
||||||
|
from shared import shutdown
|
||||||
from debug import logger
|
from debug import logger
|
||||||
|
|
||||||
libAvailable = True
|
libAvailable = True
|
||||||
|
@ -40,7 +41,7 @@ def initCL():
|
||||||
if (len(enabledGpus) > 0):
|
if (len(enabledGpus) > 0):
|
||||||
ctx = cl.Context(devices=enabledGpus)
|
ctx = cl.Context(devices=enabledGpus)
|
||||||
queue = cl.CommandQueue(ctx)
|
queue = cl.CommandQueue(ctx)
|
||||||
f = open(os.path.join(codePath(), "bitmsghash", 'bitmsghash.cl'), 'r')
|
f = open(os.path.join(paths.codePath(), "bitmsghash", 'bitmsghash.cl'), 'r')
|
||||||
fstr = ''.join(f.readlines())
|
fstr = ''.join(f.readlines())
|
||||||
program = cl.Program(ctx, fstr).build(options="")
|
program = cl.Program(ctx, fstr).build(options="")
|
||||||
logger.info("Loaded OpenCL kernel")
|
logger.info("Loaded OpenCL kernel")
|
||||||
|
|
70
src/paths.py
Normal file
70
src/paths.py
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
from os import environ, path
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# When using py2exe or py2app, the variable frozen is added to the sys
|
||||||
|
# namespace. This can be used to setup a different code path for
|
||||||
|
# binary distributions vs source distributions.
|
||||||
|
frozen = getattr(sys,'frozen', None)
|
||||||
|
|
||||||
|
def lookupExeFolder():
|
||||||
|
if frozen:
|
||||||
|
if frozen == "macosx_app":
|
||||||
|
# targetdir/Bitmessage.app/Contents/MacOS/Bitmessage
|
||||||
|
exeFolder = path.dirname(path.dirname(path.dirname(path.dirname(sys.executable)))) + path.sep
|
||||||
|
else:
|
||||||
|
exeFolder = path.dirname(sys.executable) + path.sep
|
||||||
|
elif __file__:
|
||||||
|
exeFolder = path.dirname(__file__) + path.sep
|
||||||
|
else:
|
||||||
|
exeFolder = ''
|
||||||
|
return exeFolder
|
||||||
|
|
||||||
|
def lookupAppdataFolder():
|
||||||
|
APPNAME = "PyBitmessage"
|
||||||
|
if "BITMESSAGE_HOME" in environ:
|
||||||
|
dataFolder = environ["BITMESSAGE_HOME"]
|
||||||
|
if dataFolder[-1] not in [os.path.sep, os.path.altsep]:
|
||||||
|
dataFolder += os.path.sep
|
||||||
|
elif sys.platform == 'darwin':
|
||||||
|
if "HOME" in environ:
|
||||||
|
dataFolder = path.join(os.environ["HOME"], "Library/Application Support/", APPNAME) + '/'
|
||||||
|
else:
|
||||||
|
stringToLog = 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
|
||||||
|
if 'logger' in globals():
|
||||||
|
logger.critical(stringToLog)
|
||||||
|
else:
|
||||||
|
print stringToLog
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
elif 'win32' in sys.platform or 'win64' in sys.platform:
|
||||||
|
dataFolder = path.join(environ['APPDATA'].decode(sys.getfilesystemencoding(), 'ignore'), APPNAME) + path.sep
|
||||||
|
else:
|
||||||
|
from shutil import move
|
||||||
|
try:
|
||||||
|
dataFolder = path.join(environ["XDG_CONFIG_HOME"], APPNAME)
|
||||||
|
except KeyError:
|
||||||
|
dataFolder = path.join(environ["HOME"], ".config", APPNAME)
|
||||||
|
|
||||||
|
# Migrate existing data to the proper location if this is an existing install
|
||||||
|
try:
|
||||||
|
move(path.join(environ["HOME"], ".%s" % APPNAME), dataFolder)
|
||||||
|
stringToLog = "Moving data folder to %s" % (dataFolder)
|
||||||
|
if 'logger' in globals():
|
||||||
|
logger.info(stringToLog)
|
||||||
|
else:
|
||||||
|
print stringToLog
|
||||||
|
except IOError:
|
||||||
|
# Old directory may not exist.
|
||||||
|
pass
|
||||||
|
dataFolder = dataFolder + '/'
|
||||||
|
return dataFolder
|
||||||
|
|
||||||
|
def codePath():
|
||||||
|
if frozen == "macosx_app":
|
||||||
|
codePath = environ.get("RESOURCEPATH")
|
||||||
|
elif frozen: # windows
|
||||||
|
codePath = sys._MEIPASS
|
||||||
|
else:
|
||||||
|
codePath = path.dirname(__file__)
|
||||||
|
return codePath
|
||||||
|
|
|
@ -8,6 +8,7 @@ import sys
|
||||||
import time
|
import time
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
from debug import logger
|
from debug import logger
|
||||||
|
import paths
|
||||||
import shared
|
import shared
|
||||||
import openclpow
|
import openclpow
|
||||||
import tr
|
import tr
|
||||||
|
@ -169,15 +170,15 @@ def buildCPoW():
|
||||||
|
|
||||||
if bmpow is not None:
|
if bmpow is not None:
|
||||||
return
|
return
|
||||||
if shared.frozen is not None:
|
if paths.frozen is not None:
|
||||||
notifyBuild(False)
|
notifyBuild(False)
|
||||||
return
|
return
|
||||||
if sys.platform in ["win32", "win64"]:
|
if sys.platform in ["win32", "win64"]:
|
||||||
notifyBuild(False)
|
notifyBuild(False)
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
call(["make", "-C", os.path.join(shared.codePath(), "bitmsghash")])
|
call(["make", "-C", os.path.join(paths.codePath(), "bitmsghash")])
|
||||||
if os.path.exists(os.path.join(shared.codePath(), "bitmsghash", "bitmsghash.so")):
|
if os.path.exists(os.path.join(paths.codePath(), "bitmsghash", "bitmsghash.so")):
|
||||||
init()
|
init()
|
||||||
notifyBuild(True)
|
notifyBuild(True)
|
||||||
else:
|
else:
|
||||||
|
@ -208,7 +209,7 @@ def run(target, initialHash):
|
||||||
raise
|
raise
|
||||||
except:
|
except:
|
||||||
pass # fallback
|
pass # fallback
|
||||||
if shared.frozen == "macosx_app" or not shared.frozen:
|
if paths.frozen == "macosx_app" or not paths.frozen:
|
||||||
# on my (Peter Surda) Windows 10, Windows Defender
|
# on my (Peter Surda) Windows 10, Windows Defender
|
||||||
# does not like this and fights with PyBitmessage
|
# does not like this and fights with PyBitmessage
|
||||||
# over CPU, resulting in very slow PoW
|
# over CPU, resulting in very slow PoW
|
||||||
|
@ -238,7 +239,7 @@ def init():
|
||||||
bitmsglib = 'bitmsghash64.dll'
|
bitmsglib = 'bitmsghash64.dll'
|
||||||
try:
|
try:
|
||||||
# MSVS
|
# MSVS
|
||||||
bso = ctypes.WinDLL(os.path.join(shared.codePath(), "bitmsghash", bitmsglib))
|
bso = ctypes.WinDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
||||||
logger.info("Loaded C PoW DLL (stdcall) %s", bitmsglib)
|
logger.info("Loaded C PoW DLL (stdcall) %s", bitmsglib)
|
||||||
bmpow = bso.BitmessagePOW
|
bmpow = bso.BitmessagePOW
|
||||||
bmpow.restype = ctypes.c_ulonglong
|
bmpow.restype = ctypes.c_ulonglong
|
||||||
|
@ -248,7 +249,7 @@ def init():
|
||||||
logger.error("C PoW test fail.", exc_info=True)
|
logger.error("C PoW test fail.", exc_info=True)
|
||||||
try:
|
try:
|
||||||
# MinGW
|
# MinGW
|
||||||
bso = ctypes.CDLL(os.path.join(shared.codePath(), "bitmsghash", bitmsglib))
|
bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
||||||
logger.info("Loaded C PoW DLL (cdecl) %s", bitmsglib)
|
logger.info("Loaded C PoW DLL (cdecl) %s", bitmsglib)
|
||||||
bmpow = bso.BitmessagePOW
|
bmpow = bso.BitmessagePOW
|
||||||
bmpow.restype = ctypes.c_ulonglong
|
bmpow.restype = ctypes.c_ulonglong
|
||||||
|
@ -259,7 +260,7 @@ def init():
|
||||||
bso = None
|
bso = None
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
bso = ctypes.CDLL(os.path.join(shared.codePath(), "bitmsghash", bitmsglib))
|
bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
||||||
logger.info("Loaded C PoW DLL %s", bitmsglib)
|
logger.info("Loaded C PoW DLL %s", bitmsglib)
|
||||||
except:
|
except:
|
||||||
bso = None
|
bso = None
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import base64
|
import base64
|
||||||
|
from binascii import hexlify
|
||||||
import hashlib
|
import hashlib
|
||||||
import random
|
import random
|
||||||
import socket
|
import socket
|
||||||
|
@ -7,9 +8,13 @@ from struct import pack, unpack, Struct
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from addresses import encodeVarint, decodeVarint
|
from addresses import calculateInventoryHash, encodeVarint, decodeVarint, decodeAddress, varintDecodeError
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
from state import neededPubkeys, extPort, socksIP
|
from debug import logger
|
||||||
|
from helper_sql import sqlExecute
|
||||||
|
import highlevelcrypto
|
||||||
|
from inventory import Inventory
|
||||||
|
import state
|
||||||
from version import softwareVersion
|
from version import softwareVersion
|
||||||
|
|
||||||
#Service flags
|
#Service flags
|
||||||
|
@ -22,6 +27,10 @@ BITFIELD_DOESACK = 1
|
||||||
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack(
|
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack(
|
||||||
'>Q', random.randrange(1, 18446744073709551615))
|
'>Q', random.randrange(1, 18446744073709551615))
|
||||||
|
|
||||||
|
#If changed, these values will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
|
||||||
|
networkDefaultProofOfWorkNonceTrialsPerByte = 1000 #The amount of work that should be performed (and demanded) per byte of the payload.
|
||||||
|
networkDefaultPayloadLengthExtraBytes = 1000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
|
||||||
|
|
||||||
#Compiled struct for packing/unpacking headers
|
#Compiled struct for packing/unpacking headers
|
||||||
#New code should use CreatePacket instead of Header.pack
|
#New code should use CreatePacket instead of Header.pack
|
||||||
Header = Struct('!L12sL4s')
|
Header = Struct('!L12sL4s')
|
||||||
|
@ -79,11 +88,26 @@ def sslProtocolVersion():
|
||||||
|
|
||||||
def checkSocksIP(host):
|
def checkSocksIP(host):
|
||||||
try:
|
try:
|
||||||
if socksIP is None or not socksIP:
|
if state.socksIP is None or not state.socksIP:
|
||||||
socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
state.socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
||||||
except NameError:
|
except NameError:
|
||||||
socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
state.socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
||||||
return socksIP == host
|
return state.socksIP == host
|
||||||
|
|
||||||
|
def isProofOfWorkSufficient(data,
|
||||||
|
nonceTrialsPerByte=0,
|
||||||
|
payloadLengthExtraBytes=0):
|
||||||
|
if nonceTrialsPerByte < networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||||
|
nonceTrialsPerByte = networkDefaultProofOfWorkNonceTrialsPerByte
|
||||||
|
if payloadLengthExtraBytes < networkDefaultPayloadLengthExtraBytes:
|
||||||
|
payloadLengthExtraBytes = networkDefaultPayloadLengthExtraBytes
|
||||||
|
endOfLifeTime, = unpack('>Q', data[8:16])
|
||||||
|
TTL = endOfLifeTime - int(time.time())
|
||||||
|
if TTL < 300:
|
||||||
|
TTL = 300
|
||||||
|
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
|
||||||
|
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8])
|
||||||
|
return POW <= 2 ** 64 / (nonceTrialsPerByte*(len(data) + payloadLengthExtraBytes + ((TTL*(len(data)+payloadLengthExtraBytes))/(2 ** 16))))
|
||||||
|
|
||||||
# Packet creation
|
# Packet creation
|
||||||
|
|
||||||
|
@ -117,10 +141,10 @@ def assembleVersionMessage(remoteHost, remotePort, myStreamNumber, server = Fals
|
||||||
# we have a separate extPort and
|
# we have a separate extPort and
|
||||||
# incoming over clearnet or
|
# incoming over clearnet or
|
||||||
# outgoing through clearnet
|
# outgoing through clearnet
|
||||||
if BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp') and extPort \
|
if BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp') and state.extPort \
|
||||||
and ((server and not checkSocksIP(remoteHost)) or \
|
and ((server and not checkSocksIP(remoteHost)) or \
|
||||||
(BMConfigParser().get("bitmessagesettings", "socksproxytype") == "none" and not server)):
|
(BMConfigParser().get("bitmessagesettings", "socksproxytype") == "none" and not server)):
|
||||||
payload += pack('>H', extPort)
|
payload += pack('>H', state.extPort)
|
||||||
elif checkSocksIP(remoteHost) and server: # incoming connection over Tor
|
elif checkSocksIP(remoteHost) and server: # incoming connection over Tor
|
||||||
payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'onionport'))
|
payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'onionport'))
|
||||||
else: # no extPort and not incoming over Tor
|
else: # no extPort and not incoming over Tor
|
||||||
|
@ -178,7 +202,7 @@ def decryptAndCheckPubkeyPayload(data, address):
|
||||||
encryptedData = data[readPosition:]
|
encryptedData = data[readPosition:]
|
||||||
|
|
||||||
# Let us try to decrypt the pubkey
|
# Let us try to decrypt the pubkey
|
||||||
toAddress, cryptorObject = neededPubkeys[tag]
|
toAddress, cryptorObject = state.neededPubkeys[tag]
|
||||||
if toAddress != address:
|
if toAddress != address:
|
||||||
logger.critical('decryptAndCheckPubkeyPayload failed due to toAddress mismatch. This is very peculiar. toAddress: %s, address %s' % (toAddress, address))
|
logger.critical('decryptAndCheckPubkeyPayload failed due to toAddress mismatch. This is very peculiar. toAddress: %s, address %s' % (toAddress, address))
|
||||||
# the only way I can think that this could happen is if someone encodes their address data two different ways.
|
# the only way I can think that this could happen is if someone encodes their address data two different ways.
|
||||||
|
@ -308,7 +332,7 @@ def _checkAndShareUndefinedObjectWithPeers(data):
|
||||||
readPosition += objectVersionLength
|
readPosition += objectVersionLength
|
||||||
streamNumber, streamNumberLength = decodeVarint(
|
streamNumber, streamNumberLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 9])
|
data[readPosition:readPosition + 9])
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -331,7 +355,7 @@ def _checkAndShareMsgWithPeers(data):
|
||||||
readPosition += objectVersionLength
|
readPosition += objectVersionLength
|
||||||
streamNumber, streamNumberLength = decodeVarint(
|
streamNumber, streamNumberLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 9])
|
data[readPosition:readPosition + 9])
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
readPosition += streamNumberLength
|
readPosition += streamNumberLength
|
||||||
|
@ -362,7 +386,7 @@ def _checkAndShareGetpubkeyWithPeers(data):
|
||||||
readPosition += addressVersionLength
|
readPosition += addressVersionLength
|
||||||
streamNumber, streamNumberLength = decodeVarint(
|
streamNumber, streamNumberLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
readPosition += streamNumberLength
|
readPosition += streamNumberLength
|
||||||
|
@ -393,7 +417,7 @@ def _checkAndSharePubkeyWithPeers(data):
|
||||||
streamNumber, varintLength = decodeVarint(
|
streamNumber, varintLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += varintLength
|
readPosition += varintLength
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
if addressVersion >= 4:
|
if addressVersion >= 4:
|
||||||
|
@ -430,7 +454,7 @@ def _checkAndShareBroadcastWithPeers(data):
|
||||||
if broadcastVersion >= 2:
|
if broadcastVersion >= 2:
|
||||||
streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
|
streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
|
||||||
readPosition += streamNumberLength
|
readPosition += streamNumberLength
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
if broadcastVersion >= 3:
|
if broadcastVersion >= 3:
|
||||||
|
@ -452,3 +476,10 @@ def _checkAndShareBroadcastWithPeers(data):
|
||||||
# Now let's queue it to be processed ourselves.
|
# Now let's queue it to be processed ourselves.
|
||||||
objectProcessorQueue.put((objectType,data))
|
objectProcessorQueue.put((objectType,data))
|
||||||
|
|
||||||
|
# If you want to command all of the sendDataThreads to do something, like shutdown or send some data, this
|
||||||
|
# function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are
|
||||||
|
# responsible for putting their queue into (and out of) the sendDataQueues list.
|
||||||
|
def broadcastToSendDataQueues(data):
|
||||||
|
# logger.debug('running broadcastToSendDataQueues')
|
||||||
|
for q in state.sendDataQueues:
|
||||||
|
q.put(data)
|
||||||
|
|
138
src/shared.py
138
src/shared.py
|
@ -8,14 +8,11 @@ useVeryEasyProofOfWorkForTesting = False # If you set this to True while on the
|
||||||
|
|
||||||
|
|
||||||
# Libraries.
|
# Libraries.
|
||||||
import base64
|
|
||||||
import collections
|
import collections
|
||||||
import os
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
import Queue
|
import Queue
|
||||||
import random
|
|
||||||
from multiprocessing import active_children, Queue as mpQueue, Lock as mpLock
|
from multiprocessing import active_children, Queue as mpQueue, Lock as mpLock
|
||||||
import socket
|
|
||||||
import sys
|
import sys
|
||||||
import stat
|
import stat
|
||||||
import threading
|
import threading
|
||||||
|
@ -36,6 +33,8 @@ import shared
|
||||||
from helper_sql import *
|
from helper_sql import *
|
||||||
from helper_threading import *
|
from helper_threading import *
|
||||||
from inventory import Inventory
|
from inventory import Inventory
|
||||||
|
import protocol
|
||||||
|
import state
|
||||||
|
|
||||||
|
|
||||||
myECCryptorObjects = {}
|
myECCryptorObjects = {}
|
||||||
|
@ -52,9 +51,7 @@ parserLock = mpLock()
|
||||||
addressGeneratorQueue = Queue.Queue()
|
addressGeneratorQueue = Queue.Queue()
|
||||||
knownNodesLock = threading.Lock()
|
knownNodesLock = threading.Lock()
|
||||||
knownNodes = {}
|
knownNodes = {}
|
||||||
sendDataQueues = [] #each sendData thread puts its queue in this list.
|
|
||||||
printLock = threading.Lock()
|
printLock = threading.Lock()
|
||||||
appdata = '' #holds the location of the application data storage directory
|
|
||||||
statusIconColor = 'red'
|
statusIconColor = 'red'
|
||||||
connectedHostsList = {} #List of hosts to which we are connected. Used to guarantee that the outgoingSynSender threads won't connect to the same remote node twice.
|
connectedHostsList = {} #List of hosts to which we are connected. Used to guarantee that the outgoingSynSender threads won't connect to the same remote node twice.
|
||||||
shutdown = 0 #Set to 1 by the doCleanShutdown function. Used to tell the proof of work worker threads to exit.
|
shutdown = 0 #Set to 1 by the doCleanShutdown function. Used to tell the proof of work worker threads to exit.
|
||||||
|
@ -86,27 +83,17 @@ daemon = False
|
||||||
needToWriteKnownNodesToDisk = False # If True, the singleCleaner will write it to disk eventually.
|
needToWriteKnownNodesToDisk = False # If True, the singleCleaner will write it to disk eventually.
|
||||||
maximumLengthOfTimeToBotherResendingMessages = 0
|
maximumLengthOfTimeToBotherResendingMessages = 0
|
||||||
objectProcessorQueue = ObjectProcessorQueue() # receiveDataThreads dump objects they hear on the network into this queue to be processed.
|
objectProcessorQueue = ObjectProcessorQueue() # receiveDataThreads dump objects they hear on the network into this queue to be processed.
|
||||||
streamsInWhichIAmParticipating = {}
|
|
||||||
timeOffsetWrongCount = 0
|
timeOffsetWrongCount = 0
|
||||||
|
|
||||||
# sanity check, prevent doing ridiculous PoW
|
# sanity check, prevent doing ridiculous PoW
|
||||||
# 20 million PoWs equals approximately 2 days on dev's dual R9 290
|
# 20 million PoWs equals approximately 2 days on dev's dual R9 290
|
||||||
ridiculousDifficulty = 20000000
|
ridiculousDifficulty = 20000000
|
||||||
|
|
||||||
#If changed, these values will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
|
|
||||||
networkDefaultProofOfWorkNonceTrialsPerByte = 1000 #The amount of work that should be performed (and demanded) per byte of the payload.
|
|
||||||
networkDefaultPayloadLengthExtraBytes = 1000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
|
|
||||||
|
|
||||||
# Remember here the RPC port read from namecoin.conf so we can restore to
|
# Remember here the RPC port read from namecoin.conf so we can restore to
|
||||||
# it as default whenever the user changes the "method" selection for
|
# it as default whenever the user changes the "method" selection for
|
||||||
# namecoin integration to "namecoind".
|
# namecoin integration to "namecoind".
|
||||||
namecoinDefaultRpcPort = "8336"
|
namecoinDefaultRpcPort = "8336"
|
||||||
|
|
||||||
# When using py2exe or py2app, the variable frozen is added to the sys
|
|
||||||
# namespace. This can be used to setup a different code path for
|
|
||||||
# binary distributions vs source distributions.
|
|
||||||
frozen = getattr(sys,'frozen', None)
|
|
||||||
|
|
||||||
# If the trustedpeer option is specified in keys.dat then this will
|
# If the trustedpeer option is specified in keys.dat then this will
|
||||||
# contain a Peer which will be connected to instead of using the
|
# contain a Peer which will be connected to instead of using the
|
||||||
# addresses advertised by other peers. The client will only connect to
|
# addresses advertised by other peers. The client will only connect to
|
||||||
|
@ -119,68 +106,6 @@ frozen = getattr(sys,'frozen', None)
|
||||||
# security.
|
# security.
|
||||||
trustedPeer = None
|
trustedPeer = None
|
||||||
|
|
||||||
def lookupExeFolder():
|
|
||||||
if frozen:
|
|
||||||
if frozen == "macosx_app":
|
|
||||||
# targetdir/Bitmessage.app/Contents/MacOS/Bitmessage
|
|
||||||
exeFolder = path.dirname(path.dirname(path.dirname(path.dirname(sys.executable)))) + path.sep
|
|
||||||
else:
|
|
||||||
exeFolder = path.dirname(sys.executable) + path.sep
|
|
||||||
elif __file__:
|
|
||||||
exeFolder = path.dirname(__file__) + path.sep
|
|
||||||
else:
|
|
||||||
exeFolder = ''
|
|
||||||
return exeFolder
|
|
||||||
|
|
||||||
def lookupAppdataFolder():
|
|
||||||
APPNAME = "PyBitmessage"
|
|
||||||
if "BITMESSAGE_HOME" in environ:
|
|
||||||
dataFolder = environ["BITMESSAGE_HOME"]
|
|
||||||
if dataFolder[-1] not in [os.path.sep, os.path.altsep]:
|
|
||||||
dataFolder += os.path.sep
|
|
||||||
elif sys.platform == 'darwin':
|
|
||||||
if "HOME" in environ:
|
|
||||||
dataFolder = path.join(os.environ["HOME"], "Library/Application Support/", APPNAME) + '/'
|
|
||||||
else:
|
|
||||||
stringToLog = 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
|
|
||||||
if 'logger' in globals():
|
|
||||||
logger.critical(stringToLog)
|
|
||||||
else:
|
|
||||||
print stringToLog
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
elif 'win32' in sys.platform or 'win64' in sys.platform:
|
|
||||||
dataFolder = path.join(environ['APPDATA'].decode(sys.getfilesystemencoding(), 'ignore'), APPNAME) + path.sep
|
|
||||||
else:
|
|
||||||
from shutil import move
|
|
||||||
try:
|
|
||||||
dataFolder = path.join(environ["XDG_CONFIG_HOME"], APPNAME)
|
|
||||||
except KeyError:
|
|
||||||
dataFolder = path.join(environ["HOME"], ".config", APPNAME)
|
|
||||||
|
|
||||||
# Migrate existing data to the proper location if this is an existing install
|
|
||||||
try:
|
|
||||||
move(path.join(environ["HOME"], ".%s" % APPNAME), dataFolder)
|
|
||||||
stringToLog = "Moving data folder to %s" % (dataFolder)
|
|
||||||
if 'logger' in globals():
|
|
||||||
logger.info(stringToLog)
|
|
||||||
else:
|
|
||||||
print stringToLog
|
|
||||||
except IOError:
|
|
||||||
# Old directory may not exist.
|
|
||||||
pass
|
|
||||||
dataFolder = dataFolder + '/'
|
|
||||||
return dataFolder
|
|
||||||
|
|
||||||
def codePath():
|
|
||||||
if frozen == "macosx_app":
|
|
||||||
codePath = os.environ.get("RESOURCEPATH")
|
|
||||||
elif frozen: # windows
|
|
||||||
codePath = sys._MEIPASS
|
|
||||||
else:
|
|
||||||
codePath = os.path.dirname(__file__)
|
|
||||||
return codePath
|
|
||||||
|
|
||||||
def isAddressInMyAddressBook(address):
|
def isAddressInMyAddressBook(address):
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''select address from addressbook where address=?''',
|
'''select address from addressbook where address=?''',
|
||||||
|
@ -236,7 +161,7 @@ def reloadMyAddressHashes():
|
||||||
myAddressesByTag.clear()
|
myAddressesByTag.clear()
|
||||||
#myPrivateKeys.clear()
|
#myPrivateKeys.clear()
|
||||||
|
|
||||||
keyfileSecure = checkSensitiveFilePermissions(appdata + 'keys.dat')
|
keyfileSecure = checkSensitiveFilePermissions(state.appdata + 'keys.dat')
|
||||||
configSections = BMConfigParser().sections()
|
configSections = BMConfigParser().sections()
|
||||||
hasEnabledKeys = False
|
hasEnabledKeys = False
|
||||||
for addressInKeysFile in configSections:
|
for addressInKeysFile in configSections:
|
||||||
|
@ -262,7 +187,7 @@ def reloadMyAddressHashes():
|
||||||
logger.error('Error in reloadMyAddressHashes: Can\'t handle address versions other than 2, 3, or 4.\n')
|
logger.error('Error in reloadMyAddressHashes: Can\'t handle address versions other than 2, 3, or 4.\n')
|
||||||
|
|
||||||
if not keyfileSecure:
|
if not keyfileSecure:
|
||||||
fixSensitiveFilePermissions(appdata + 'keys.dat', hasEnabledKeys)
|
fixSensitiveFilePermissions(state.appdata + 'keys.dat', hasEnabledKeys)
|
||||||
|
|
||||||
def reloadBroadcastSendersForWhichImWatching():
|
def reloadBroadcastSendersForWhichImWatching():
|
||||||
broadcastSendersForWhichImWatching.clear()
|
broadcastSendersForWhichImWatching.clear()
|
||||||
|
@ -286,21 +211,6 @@ def reloadBroadcastSendersForWhichImWatching():
|
||||||
privEncryptionKey = doubleHashOfAddressData[:32]
|
privEncryptionKey = doubleHashOfAddressData[:32]
|
||||||
MyECSubscriptionCryptorObjects[tag] = highlevelcrypto.makeCryptor(hexlify(privEncryptionKey))
|
MyECSubscriptionCryptorObjects[tag] = highlevelcrypto.makeCryptor(hexlify(privEncryptionKey))
|
||||||
|
|
||||||
def isProofOfWorkSufficient(data,
|
|
||||||
nonceTrialsPerByte=0,
|
|
||||||
payloadLengthExtraBytes=0):
|
|
||||||
if nonceTrialsPerByte < networkDefaultProofOfWorkNonceTrialsPerByte:
|
|
||||||
nonceTrialsPerByte = networkDefaultProofOfWorkNonceTrialsPerByte
|
|
||||||
if payloadLengthExtraBytes < networkDefaultPayloadLengthExtraBytes:
|
|
||||||
payloadLengthExtraBytes = networkDefaultPayloadLengthExtraBytes
|
|
||||||
endOfLifeTime, = unpack('>Q', data[8:16])
|
|
||||||
TTL = endOfLifeTime - int(time.time())
|
|
||||||
if TTL < 300:
|
|
||||||
TTL = 300
|
|
||||||
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
|
|
||||||
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8])
|
|
||||||
return POW <= 2 ** 64 / (nonceTrialsPerByte*(len(data) + payloadLengthExtraBytes + ((TTL*(len(data)+payloadLengthExtraBytes))/(2 ** 16))))
|
|
||||||
|
|
||||||
def doCleanShutdown():
|
def doCleanShutdown():
|
||||||
global shutdown, thisapp
|
global shutdown, thisapp
|
||||||
shutdown = 1 #Used to tell proof of work worker threads and the objectProcessorThread to exit.
|
shutdown = 1 #Used to tell proof of work worker threads and the objectProcessorThread to exit.
|
||||||
|
@ -308,7 +218,7 @@ def doCleanShutdown():
|
||||||
parserInputQueue.put(None, False)
|
parserInputQueue.put(None, False)
|
||||||
except Queue.Full:
|
except Queue.Full:
|
||||||
pass
|
pass
|
||||||
broadcastToSendDataQueues((0, 'shutdown', 'no data'))
|
protocol.broadcastToSendDataQueues((0, 'shutdown', 'no data'))
|
||||||
objectProcessorQueue.put(('checkShutdownVariable', 'no data'))
|
objectProcessorQueue.put(('checkShutdownVariable', 'no data'))
|
||||||
for thread in threading.enumerate():
|
for thread in threading.enumerate():
|
||||||
if thread.isAlive() and isinstance(thread, StoppableThread):
|
if thread.isAlive() and isinstance(thread, StoppableThread):
|
||||||
|
@ -316,7 +226,7 @@ def doCleanShutdown():
|
||||||
|
|
||||||
knownNodesLock.acquire()
|
knownNodesLock.acquire()
|
||||||
UISignalQueue.put(('updateStatusBar','Saving the knownNodes list of peers to disk...'))
|
UISignalQueue.put(('updateStatusBar','Saving the knownNodes list of peers to disk...'))
|
||||||
output = open(appdata + 'knownnodes.dat', 'wb')
|
output = open(state.appdata + 'knownnodes.dat', 'wb')
|
||||||
logger.info('finished opening knownnodes.dat. Now pickle.dump')
|
logger.info('finished opening knownnodes.dat. Now pickle.dump')
|
||||||
pickle.dump(knownNodes, output)
|
pickle.dump(knownNodes, output)
|
||||||
logger.info('Completed pickle.dump. Closing output...')
|
logger.info('Completed pickle.dump. Closing output...')
|
||||||
|
@ -359,14 +269,6 @@ def doCleanShutdown():
|
||||||
else:
|
else:
|
||||||
logger.info('Core shutdown complete.')
|
logger.info('Core shutdown complete.')
|
||||||
|
|
||||||
# If you want to command all of the sendDataThreads to do something, like shutdown or send some data, this
|
|
||||||
# function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are
|
|
||||||
# responsible for putting their queue into (and out of) the sendDataQueues list.
|
|
||||||
def broadcastToSendDataQueues(data):
|
|
||||||
# logger.debug('running broadcastToSendDataQueues')
|
|
||||||
for q in sendDataQueues:
|
|
||||||
q.put(data)
|
|
||||||
|
|
||||||
def fixPotentiallyInvalidUTF8Data(text):
|
def fixPotentiallyInvalidUTF8Data(text):
|
||||||
try:
|
try:
|
||||||
unicode(text,'utf-8')
|
unicode(text,'utf-8')
|
||||||
|
@ -554,7 +456,7 @@ def checkAndShareObjectWithPeers(data):
|
||||||
logger.info('The payload length of this object is too large (%s bytes). Ignoring it.' % len(data))
|
logger.info('The payload length of this object is too large (%s bytes). Ignoring it.' % len(data))
|
||||||
return 0
|
return 0
|
||||||
# Let us check to make sure that the proof of work is sufficient.
|
# Let us check to make sure that the proof of work is sufficient.
|
||||||
if not isProofOfWorkSufficient(data):
|
if not protocol.isProofOfWorkSufficient(data):
|
||||||
logger.info('Proof of work is insufficient.')
|
logger.info('Proof of work is insufficient.')
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
@ -597,7 +499,7 @@ def _checkAndShareUndefinedObjectWithPeers(data):
|
||||||
readPosition += objectVersionLength
|
readPosition += objectVersionLength
|
||||||
streamNumber, streamNumberLength = decodeVarint(
|
streamNumber, streamNumberLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 9])
|
data[readPosition:readPosition + 9])
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -609,7 +511,7 @@ def _checkAndShareUndefinedObjectWithPeers(data):
|
||||||
Inventory()[inventoryHash] = (
|
Inventory()[inventoryHash] = (
|
||||||
objectType, streamNumber, data, embeddedTime,'')
|
objectType, streamNumber, data, embeddedTime,'')
|
||||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
|
|
||||||
def _checkAndShareMsgWithPeers(data):
|
def _checkAndShareMsgWithPeers(data):
|
||||||
|
@ -620,7 +522,7 @@ def _checkAndShareMsgWithPeers(data):
|
||||||
readPosition += objectVersionLength
|
readPosition += objectVersionLength
|
||||||
streamNumber, streamNumberLength = decodeVarint(
|
streamNumber, streamNumberLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 9])
|
data[readPosition:readPosition + 9])
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
readPosition += streamNumberLength
|
readPosition += streamNumberLength
|
||||||
|
@ -633,7 +535,7 @@ def _checkAndShareMsgWithPeers(data):
|
||||||
Inventory()[inventoryHash] = (
|
Inventory()[inventoryHash] = (
|
||||||
objectType, streamNumber, data, embeddedTime,'')
|
objectType, streamNumber, data, embeddedTime,'')
|
||||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
# Now let's enqueue it to be processed ourselves.
|
# Now let's enqueue it to be processed ourselves.
|
||||||
objectProcessorQueue.put((objectType,data))
|
objectProcessorQueue.put((objectType,data))
|
||||||
|
@ -651,7 +553,7 @@ def _checkAndShareGetpubkeyWithPeers(data):
|
||||||
readPosition += addressVersionLength
|
readPosition += addressVersionLength
|
||||||
streamNumber, streamNumberLength = decodeVarint(
|
streamNumber, streamNumberLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
readPosition += streamNumberLength
|
readPosition += streamNumberLength
|
||||||
|
@ -666,7 +568,7 @@ def _checkAndShareGetpubkeyWithPeers(data):
|
||||||
objectType, streamNumber, data, embeddedTime,'')
|
objectType, streamNumber, data, embeddedTime,'')
|
||||||
# This getpubkey request is valid. Forward to peers.
|
# This getpubkey request is valid. Forward to peers.
|
||||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
# Now let's queue it to be processed ourselves.
|
# Now let's queue it to be processed ourselves.
|
||||||
objectProcessorQueue.put((objectType,data))
|
objectProcessorQueue.put((objectType,data))
|
||||||
|
@ -682,7 +584,7 @@ def _checkAndSharePubkeyWithPeers(data):
|
||||||
streamNumber, varintLength = decodeVarint(
|
streamNumber, varintLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += varintLength
|
readPosition += varintLength
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
if addressVersion >= 4:
|
if addressVersion >= 4:
|
||||||
|
@ -700,7 +602,7 @@ def _checkAndSharePubkeyWithPeers(data):
|
||||||
objectType, streamNumber, data, embeddedTime, tag)
|
objectType, streamNumber, data, embeddedTime, tag)
|
||||||
# This object is valid. Forward it to peers.
|
# This object is valid. Forward it to peers.
|
||||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
|
|
||||||
# Now let's queue it to be processed ourselves.
|
# Now let's queue it to be processed ourselves.
|
||||||
|
@ -719,7 +621,7 @@ def _checkAndShareBroadcastWithPeers(data):
|
||||||
if broadcastVersion >= 2:
|
if broadcastVersion >= 2:
|
||||||
streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
|
streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
|
||||||
readPosition += streamNumberLength
|
readPosition += streamNumberLength
|
||||||
if not streamNumber in streamsInWhichIAmParticipating:
|
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
if broadcastVersion >= 3:
|
if broadcastVersion >= 3:
|
||||||
|
@ -736,19 +638,19 @@ def _checkAndShareBroadcastWithPeers(data):
|
||||||
objectType, streamNumber, data, embeddedTime, tag)
|
objectType, streamNumber, data, embeddedTime, tag)
|
||||||
# This object is valid. Forward it to peers.
|
# This object is valid. Forward it to peers.
|
||||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
# Now let's queue it to be processed ourselves.
|
# Now let's queue it to be processed ourselves.
|
||||||
objectProcessorQueue.put((objectType,data))
|
objectProcessorQueue.put((objectType,data))
|
||||||
|
|
||||||
def openKeysFile():
|
def openKeysFile():
|
||||||
if 'linux' in sys.platform:
|
if 'linux' in sys.platform:
|
||||||
subprocess.call(["xdg-open", shared.appdata + 'keys.dat'])
|
subprocess.call(["xdg-open", state.appdata + 'keys.dat'])
|
||||||
else:
|
else:
|
||||||
os.startfile(shared.appdata + 'keys.dat')
|
os.startfile(state.appdata + 'keys.dat')
|
||||||
|
|
||||||
def writeKeysFile():
|
def writeKeysFile():
|
||||||
fileName = shared.appdata + 'keys.dat'
|
fileName = state.appdata + 'keys.dat'
|
||||||
fileNameBak = fileName + "." + datetime.datetime.now().strftime("%Y%j%H%M%S%f") + '.bak'
|
fileNameBak = fileName + "." + datetime.datetime.now().strftime("%Y%j%H%M%S%f") + '.bak'
|
||||||
# create a backup copy to prevent the accidental loss due to the disk write failure
|
# create a backup copy to prevent the accidental loss due to the disk write failure
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -6,6 +6,7 @@ from multiprocessing import Process
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import shared
|
import shared
|
||||||
|
import state
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import fcntl # @UnresolvedImport
|
import fcntl # @UnresolvedImport
|
||||||
|
@ -24,7 +25,7 @@ class singleinstance:
|
||||||
self.counter = 0
|
self.counter = 0
|
||||||
self.daemon = daemon
|
self.daemon = daemon
|
||||||
self.lockPid = None
|
self.lockPid = None
|
||||||
self.lockfile = os.path.normpath(os.path.join(shared.appdata, 'singleton%s.lock' % flavor_id))
|
self.lockfile = os.path.normpath(os.path.join(state.appdata, 'singleton%s.lock' % flavor_id))
|
||||||
|
|
||||||
if not self.daemon and not shared.curses:
|
if not self.daemon and not shared.curses:
|
||||||
# Tells the already running (if any) application to get focus.
|
# Tells the already running (if any) application to get focus.
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
neededPubkeys = {}
|
neededPubkeys = {}
|
||||||
|
streamsInWhichIAmParticipating = {}
|
||||||
|
sendDataQueues = [] #each sendData thread puts its queue in this list.
|
||||||
|
|
||||||
# For UPnP
|
# For UPnP
|
||||||
extPort = None
|
extPort = None
|
||||||
|
|
||||||
# for Tor hidden service
|
# for Tor hidden service
|
||||||
socksIP = None
|
socksIP = None
|
||||||
|
|
||||||
|
# Network protocols last check failed
|
||||||
|
networkProtocolLastFailed = {'IPv4': 0, 'IPv6': 0, 'onion': 0}
|
||||||
|
|
||||||
|
appdata = '' #holds the location of the application data storage directory
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from configparser import BMConfigParser
|
from configparser import BMConfigParser
|
||||||
import shared
|
|
||||||
|
|
||||||
# This is used so that the translateText function can be used when we are in daemon mode and not using any QT functions.
|
# This is used so that the translateText function can be used when we are in daemon mode and not using any QT functions.
|
||||||
class translateClass:
|
class translateClass:
|
||||||
|
|
Loading…
Reference in New Issue
Block a user