Fixes and refactoring
- fixes errors introduced in the earlier refactoring - more variables moved to state.py - path finding functions moved to paths.py - remembers IPv6 network unreachable (in the future can be used to skip IPv6 for a while)
This commit is contained in:
parent
5d2bebae28
commit
ac348e4e6b
27
src/api.py
27
src/api.py
|
@ -24,6 +24,7 @@ import helper_inbox
|
|||
import helper_sent
|
||||
import hashlib
|
||||
|
||||
import state
|
||||
from pyelliptic.openssl import OpenSSL
|
||||
from struct import pack
|
||||
|
||||
|
@ -251,15 +252,15 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
elif len(params) == 3:
|
||||
label, eighteenByteRipe, totalDifficulty = params
|
||||
nonceTrialsPerByte = int(
|
||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||
protocol.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
elif len(params) == 4:
|
||||
label, eighteenByteRipe, totalDifficulty, smallMessageDifficulty = params
|
||||
nonceTrialsPerByte = int(
|
||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||
protocol.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||
payloadLengthExtraBytes = int(
|
||||
shared.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
|
||||
protocol.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
|
||||
else:
|
||||
raise APIError(0, 'Too many parameters!')
|
||||
label = self._decode(label, "base64")
|
||||
|
@ -319,15 +320,15 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
elif len(params) == 6:
|
||||
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe, totalDifficulty = params
|
||||
nonceTrialsPerByte = int(
|
||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||
protocol.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
elif len(params) == 7:
|
||||
passphrase, numberOfAddresses, addressVersionNumber, streamNumber, eighteenByteRipe, totalDifficulty, smallMessageDifficulty = params
|
||||
nonceTrialsPerByte = int(
|
||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||
protocol.networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty)
|
||||
payloadLengthExtraBytes = int(
|
||||
shared.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
|
||||
protocol.networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty)
|
||||
else:
|
||||
raise APIError(0, 'Too many parameters!')
|
||||
if len(passphrase) == 0:
|
||||
|
@ -450,7 +451,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
if not BMConfigParser().safeGetBoolean(address, 'chan'):
|
||||
raise APIError(25, 'Specified address is not a chan address. Use deleteAddress API call instead.')
|
||||
BMConfigParser().remove_section(address)
|
||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
||||
with open(state.appdata + 'keys.dat', 'wb') as configfile:
|
||||
BMConfigParser().write(configfile)
|
||||
return 'success'
|
||||
|
||||
|
@ -464,7 +465,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
if not BMConfigParser().has_section(address):
|
||||
raise APIError(13, 'Could not find this address in your keys.dat file.')
|
||||
BMConfigParser().remove_section(address)
|
||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
||||
with open(state.appdata + 'keys.dat', 'wb') as configfile:
|
||||
BMConfigParser().write(configfile)
|
||||
shared.UISignalQueue.put(('rerenderMessagelistFromLabels',''))
|
||||
shared.UISignalQueue.put(('rerenderMessagelistToLabels',''))
|
||||
|
@ -837,7 +838,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
# Let us do the POW and attach it to the front
|
||||
target = 2**64 / ((len(encryptedPayload)+requiredPayloadLengthExtraBytes+8) * requiredAverageProofOfWorkNonceTrialsPerByte)
|
||||
with shared.printLock:
|
||||
print '(For msg message via API) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes
|
||||
print '(For msg message via API) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / protocol.networkDefaultPayloadLengthExtraBytes
|
||||
powStartTime = time.time()
|
||||
initialHash = hashlib.sha512(encryptedPayload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
|
@ -856,7 +857,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
objectType, toStreamNumber, encryptedPayload, int(time.time()) + TTL,'')
|
||||
with shared.printLock:
|
||||
print 'Broadcasting inv for msg(API disseminatePreEncryptedMsg command):', hexlify(inventoryHash)
|
||||
shared.broadcastToSendDataQueues((
|
||||
protocol.broadcastToSendDataQueues((
|
||||
toStreamNumber, 'advertiseobject', inventoryHash))
|
||||
|
||||
def HandleTrashSentMessageByAckDAta(self, params):
|
||||
|
@ -879,8 +880,8 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
payload = self._decode(payload, "hex")
|
||||
|
||||
# Let us do the POW
|
||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
target = 2 ** 64 / ((len(payload) + protocol.networkDefaultPayloadLengthExtraBytes +
|
||||
8) * protocol.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
print '(For pubkey message via API) Doing proof of work...'
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
|
@ -903,7 +904,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
objectType, pubkeyStreamNumber, payload, int(time.time()) + TTL,'')
|
||||
with shared.printLock:
|
||||
print 'broadcasting inv within API command disseminatePubkey with hash:', hexlify(inventoryHash)
|
||||
shared.broadcastToSendDataQueues((
|
||||
protocol.broadcastToSendDataQueues((
|
||||
streamNumber, 'advertiseobject', inventoryHash))
|
||||
|
||||
def HandleGetMessageDataByDestinationHash(self, params):
|
||||
|
|
|
@ -22,6 +22,7 @@ from dialog import Dialog
|
|||
from helper_sql import *
|
||||
|
||||
import shared
|
||||
import ConfigParser
|
||||
from configparser import BMConfigParser
|
||||
from addresses import *
|
||||
from pyelliptic.openssl import OpenSSL
|
||||
|
|
|
@ -28,6 +28,7 @@ from helper_startup import isOurOperatingSystemLimitedToHavingVeryFewHalfOpenCon
|
|||
|
||||
import shared
|
||||
from helper_sql import sqlQuery
|
||||
import state
|
||||
import threading
|
||||
|
||||
# Classes
|
||||
|
@ -49,7 +50,7 @@ import helper_generic
|
|||
from helper_threading import *
|
||||
|
||||
def connectToStream(streamNumber):
|
||||
shared.streamsInWhichIAmParticipating[streamNumber] = 'no data'
|
||||
state.streamsInWhichIAmParticipating[streamNumber] = 'no data'
|
||||
selfInitiatedConnections[streamNumber] = {}
|
||||
|
||||
if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections():
|
||||
|
@ -146,10 +147,10 @@ class singleAPI(threading.Thread, StoppableThread):
|
|||
selfInitiatedConnections = {}
|
||||
|
||||
if shared.useVeryEasyProofOfWorkForTesting:
|
||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte = int(
|
||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte / 100)
|
||||
shared.networkDefaultPayloadLengthExtraBytes = int(
|
||||
shared.networkDefaultPayloadLengthExtraBytes / 100)
|
||||
protocol.networkDefaultProofOfWorkNonceTrialsPerByte = int(
|
||||
protocol.networkDefaultProofOfWorkNonceTrialsPerByte / 100)
|
||||
protocol.networkDefaultPayloadLengthExtraBytes = int(
|
||||
protocol.networkDefaultPayloadLengthExtraBytes / 100)
|
||||
|
||||
class Main:
|
||||
def start(self, daemon=False):
|
||||
|
|
|
@ -76,7 +76,10 @@ from dialogs import AddAddressDialog
|
|||
from class_objectHashHolder import objectHashHolder
|
||||
from class_singleWorker import singleWorker
|
||||
from helper_generic import powQueueSize, invQueueSize
|
||||
import paths
|
||||
from proofofwork import getPowType
|
||||
import protocol
|
||||
import state
|
||||
from statusbar import BMStatusBar
|
||||
from version import softwareVersion
|
||||
|
||||
|
@ -100,13 +103,13 @@ def change_translation(newlocale):
|
|||
pass
|
||||
|
||||
qmytranslator = QtCore.QTranslator()
|
||||
translationpath = os.path.join (shared.codePath(), 'translations', 'bitmessage_' + newlocale)
|
||||
translationpath = os.path.join (paths.codePath(), 'translations', 'bitmessage_' + newlocale)
|
||||
qmytranslator.load(translationpath)
|
||||
QtGui.QApplication.installTranslator(qmytranslator)
|
||||
|
||||
qsystranslator = QtCore.QTranslator()
|
||||
if shared.frozen:
|
||||
translationpath = os.path.join (shared.codePath(), 'translations', 'qt_' + newlocale)
|
||||
if paths.frozen:
|
||||
translationpath = os.path.join (paths.codePath(), 'translations', 'qt_' + newlocale)
|
||||
else:
|
||||
translationpath = os.path.join (str(QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath)), 'qt_' + newlocale)
|
||||
qsystranslator.load(translationpath)
|
||||
|
@ -1360,9 +1363,9 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
# if the address had a known label in the address book
|
||||
if label is not None:
|
||||
# Does a sound file exist for this particular contact?
|
||||
if (os.path.isfile(shared.appdata + 'sounds/' + label + '.wav') or
|
||||
os.path.isfile(shared.appdata + 'sounds/' + label + '.mp3')):
|
||||
soundFilename = shared.appdata + 'sounds/' + label
|
||||
if (os.path.isfile(state.appdata + 'sounds/' + label + '.wav') or
|
||||
os.path.isfile(state.appdata + 'sounds/' + label + '.mp3')):
|
||||
soundFilename = state.appdata + 'sounds/' + label
|
||||
|
||||
# Avoid making sounds more frequently than the threshold.
|
||||
# This suppresses playing sounds repeatedly when there
|
||||
|
@ -1378,19 +1381,19 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
if soundFilename is None:
|
||||
# the sound is for an address which exists in the address book
|
||||
if category is self.SOUND_KNOWN:
|
||||
soundFilename = shared.appdata + 'sounds/known'
|
||||
soundFilename = state.appdata + 'sounds/known'
|
||||
# the sound is for an unknown address
|
||||
elif category is self.SOUND_UNKNOWN:
|
||||
soundFilename = shared.appdata + 'sounds/unknown'
|
||||
soundFilename = state.appdata + 'sounds/unknown'
|
||||
# initial connection sound
|
||||
elif category is self.SOUND_CONNECTED:
|
||||
soundFilename = shared.appdata + 'sounds/connected'
|
||||
soundFilename = state.appdata + 'sounds/connected'
|
||||
# disconnected sound
|
||||
elif category is self.SOUND_DISCONNECTED:
|
||||
soundFilename = shared.appdata + 'sounds/disconnected'
|
||||
soundFilename = state.appdata + 'sounds/disconnected'
|
||||
# sound when the connection status becomes green
|
||||
elif category is self.SOUND_CONNECTION_GREEN:
|
||||
soundFilename = shared.appdata + 'sounds/green'
|
||||
soundFilename = state.appdata + 'sounds/green'
|
||||
|
||||
if soundFilename is not None and play is True:
|
||||
if not self.isConnectionSound(category):
|
||||
|
@ -1526,7 +1529,7 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
# menu button 'manage keys'
|
||||
def click_actionManageKeys(self):
|
||||
if 'darwin' in sys.platform or 'linux' in sys.platform:
|
||||
if shared.appdata == '':
|
||||
if state.appdata == '':
|
||||
# reply = QtGui.QMessageBox.information(self, 'keys.dat?','You
|
||||
# may manage your keys by editing the keys.dat file stored in
|
||||
# the same directory as this program. It is important that you
|
||||
|
@ -1536,14 +1539,14 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
|
||||
else:
|
||||
QtGui.QMessageBox.information(self, 'keys.dat?', _translate(
|
||||
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file.").arg(shared.appdata), QMessageBox.Ok)
|
||||
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file.").arg(state.appdata), QMessageBox.Ok)
|
||||
elif sys.platform == 'win32' or sys.platform == 'win64':
|
||||
if shared.appdata == '':
|
||||
if state.appdata == '':
|
||||
reply = QtGui.QMessageBox.question(self, _translate("MainWindow", "Open keys.dat?"), _translate(
|
||||
"MainWindow", "You may manage your keys by editing the keys.dat file stored in the same directory as this program. It is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)"), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
|
||||
else:
|
||||
reply = QtGui.QMessageBox.question(self, _translate("MainWindow", "Open keys.dat?"), _translate(
|
||||
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)").arg(shared.appdata), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
|
||||
"MainWindow", "You may manage your keys by editing the keys.dat file stored in\n %1 \nIt is important that you back up this file. Would you like to open the file now? (Be sure to close Bitmessage before making any changes.)").arg(state.appdata), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
|
||||
if reply == QtGui.QMessageBox.Yes:
|
||||
shared.openKeysFile()
|
||||
|
||||
|
@ -2409,10 +2412,10 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
# Demanded difficulty tab
|
||||
if float(self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) >= 1:
|
||||
BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(int(float(
|
||||
self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||
self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) * protocol.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||
if float(self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) >= 1:
|
||||
BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(int(float(
|
||||
self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
|
||||
self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) * protocol.networkDefaultPayloadLengthExtraBytes)))
|
||||
|
||||
if self.settingsDialogInstance.ui.comboBoxOpenCL.currentText().toUtf8() != BMConfigParser().safeGet("bitmessagesettings", "opencl"):
|
||||
BMConfigParser().set('bitmessagesettings', 'opencl', str(self.settingsDialogInstance.ui.comboBoxOpenCL.currentText()))
|
||||
|
@ -2421,18 +2424,18 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
|
||||
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) == 0:
|
||||
if BMConfigParser().get('bitmessagesettings','maxacceptablenoncetrialsperbyte') != str(int(float(
|
||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)):
|
||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * protocol.networkDefaultProofOfWorkNonceTrialsPerByte)):
|
||||
# the user changed the max acceptable total difficulty
|
||||
acceptableDifficultyChanged = True
|
||||
BMConfigParser().set('bitmessagesettings', 'maxacceptablenoncetrialsperbyte', str(int(float(
|
||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * protocol.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) == 0:
|
||||
if BMConfigParser().get('bitmessagesettings','maxacceptablepayloadlengthextrabytes') != str(int(float(
|
||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)):
|
||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * protocol.networkDefaultPayloadLengthExtraBytes)):
|
||||
# the user changed the max acceptable small message difficulty
|
||||
acceptableDifficultyChanged = True
|
||||
BMConfigParser().set('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', str(int(float(
|
||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
|
||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * protocol.networkDefaultPayloadLengthExtraBytes)))
|
||||
if acceptableDifficultyChanged:
|
||||
# It might now be possible to send msgs which were previously marked as toodifficult.
|
||||
# Let us change them to 'msgqueued'. The singleWorker will try to send them and will again
|
||||
|
@ -2493,21 +2496,21 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
# startup for linux
|
||||
pass
|
||||
|
||||
if shared.appdata != shared.lookupExeFolder() and self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we are NOT using portable mode now but the user selected that we should...
|
||||
if state.appdata != paths.lookupExeFolder() and self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we are NOT using portable mode now but the user selected that we should...
|
||||
# Write the keys.dat file to disk in the new location
|
||||
sqlStoredProcedure('movemessagstoprog')
|
||||
with open(shared.lookupExeFolder() + 'keys.dat', 'wb') as configfile:
|
||||
with open(paths.lookupExeFolder() + 'keys.dat', 'wb') as configfile:
|
||||
BMConfigParser().write(configfile)
|
||||
# Write the knownnodes.dat file to disk in the new location
|
||||
shared.knownNodesLock.acquire()
|
||||
output = open(shared.lookupExeFolder() + 'knownnodes.dat', 'wb')
|
||||
output = open(paths.lookupExeFolder() + 'knownnodes.dat', 'wb')
|
||||
pickle.dump(shared.knownNodes, output)
|
||||
output.close()
|
||||
shared.knownNodesLock.release()
|
||||
os.remove(shared.appdata + 'keys.dat')
|
||||
os.remove(shared.appdata + 'knownnodes.dat')
|
||||
previousAppdataLocation = shared.appdata
|
||||
shared.appdata = shared.lookupExeFolder()
|
||||
os.remove(state.appdata + 'keys.dat')
|
||||
os.remove(state.appdata + 'knownnodes.dat')
|
||||
previousAppdataLocation = state.appdata
|
||||
state.appdata = paths.lookupExeFolder()
|
||||
debug.restartLoggingInUpdatedAppdataLocation()
|
||||
try:
|
||||
os.remove(previousAppdataLocation + 'debug.log')
|
||||
|
@ -2515,25 +2518,25 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
except:
|
||||
pass
|
||||
|
||||
if shared.appdata == shared.lookupExeFolder() and not self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we ARE using portable mode now but the user selected that we shouldn't...
|
||||
shared.appdata = shared.lookupAppdataFolder()
|
||||
if not os.path.exists(shared.appdata):
|
||||
os.makedirs(shared.appdata)
|
||||
if state.appdata == paths.lookupExeFolder() and not self.settingsDialogInstance.ui.checkBoxPortableMode.isChecked(): # If we ARE using portable mode now but the user selected that we shouldn't...
|
||||
state.appdata = paths.lookupAppdataFolder()
|
||||
if not os.path.exists(state.appdata):
|
||||
os.makedirs(state.appdata)
|
||||
sqlStoredProcedure('movemessagstoappdata')
|
||||
# Write the keys.dat file to disk in the new location
|
||||
shared.writeKeysFile()
|
||||
# Write the knownnodes.dat file to disk in the new location
|
||||
shared.knownNodesLock.acquire()
|
||||
output = open(shared.appdata + 'knownnodes.dat', 'wb')
|
||||
output = open(state.appdata + 'knownnodes.dat', 'wb')
|
||||
pickle.dump(shared.knownNodes, output)
|
||||
output.close()
|
||||
shared.knownNodesLock.release()
|
||||
os.remove(shared.lookupExeFolder() + 'keys.dat')
|
||||
os.remove(shared.lookupExeFolder() + 'knownnodes.dat')
|
||||
os.remove(paths.lookupExeFolder() + 'keys.dat')
|
||||
os.remove(paths.lookupExeFolder() + 'knownnodes.dat')
|
||||
debug.restartLoggingInUpdatedAppdataLocation()
|
||||
try:
|
||||
os.remove(shared.lookupExeFolder() + 'debug.log')
|
||||
os.remove(shared.lookupExeFolder() + 'debug.log.1')
|
||||
os.remove(paths.lookupExeFolder() + 'debug.log')
|
||||
os.remove(paths.lookupExeFolder() + 'debug.log.1')
|
||||
except:
|
||||
pass
|
||||
|
||||
|
@ -3621,8 +3624,8 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
currentRow, 0).setIcon(avatarize(addressAtCurrentRow))
|
||||
|
||||
def setAvatar(self, addressAtCurrentRow):
|
||||
if not os.path.exists(shared.appdata + 'avatars/'):
|
||||
os.makedirs(shared.appdata + 'avatars/')
|
||||
if not os.path.exists(state.appdata + 'avatars/'):
|
||||
os.makedirs(state.appdata + 'avatars/')
|
||||
hash = hashlib.md5(addBMIfNotPresent(addressAtCurrentRow)).hexdigest()
|
||||
extensions = ['PNG', 'GIF', 'JPG', 'JPEG', 'SVG', 'BMP', 'MNG', 'PBM', 'PGM', 'PPM', 'TIFF', 'XBM', 'XPM', 'TGA']
|
||||
# http://pyqt.sourceforge.net/Docs/PyQt4/qimagereader.html#supportedImageFormats
|
||||
|
@ -3633,8 +3636,8 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
for ext in extensions:
|
||||
filters += [ names[ext] + ' (*.' + ext.lower() + ')' ]
|
||||
all_images_filter += [ '*.' + ext.lower() ]
|
||||
upper = shared.appdata + 'avatars/' + hash + '.' + ext.upper()
|
||||
lower = shared.appdata + 'avatars/' + hash + '.' + ext.lower()
|
||||
upper = state.appdata + 'avatars/' + hash + '.' + ext.upper()
|
||||
lower = state.appdata + 'avatars/' + hash + '.' + ext.lower()
|
||||
if os.path.isfile(lower):
|
||||
current_files += [lower]
|
||||
elif os.path.isfile(upper):
|
||||
|
@ -3643,7 +3646,7 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
filters[1:1] = ['All files (*.*)']
|
||||
sourcefile = QFileDialog.getOpenFileName(self, _translate("MainWindow","Set avatar..."), filter = ';;'.join(filters))
|
||||
# determine the correct filename (note that avatars don't use the suffix)
|
||||
destination = shared.appdata + 'avatars/' + hash + '.' + sourcefile.split('.')[-1]
|
||||
destination = state.appdata + 'avatars/' + hash + '.' + sourcefile.split('.')[-1]
|
||||
exists = QtCore.QFile.exists(destination)
|
||||
if sourcefile == '':
|
||||
# ask for removal of avatar
|
||||
|
@ -4021,12 +4024,12 @@ class settingsDialog(QtGui.QDialog):
|
|||
self.ui.checkBoxReplyBelow.setChecked(
|
||||
BMConfigParser().safeGetBoolean('bitmessagesettings', 'replybelow'))
|
||||
|
||||
if shared.appdata == shared.lookupExeFolder():
|
||||
if state.appdata == paths.lookupExeFolder():
|
||||
self.ui.checkBoxPortableMode.setChecked(True)
|
||||
else:
|
||||
try:
|
||||
import tempfile
|
||||
file = tempfile.NamedTemporaryFile(dir=shared.lookupExeFolder(), delete=True)
|
||||
file = tempfile.NamedTemporaryFile(dir=paths.lookupExeFolder(), delete=True)
|
||||
file.close # should autodelete
|
||||
except:
|
||||
self.ui.checkBoxPortableMode.setDisabled(True)
|
||||
|
@ -4086,15 +4089,15 @@ class settingsDialog(QtGui.QDialog):
|
|||
|
||||
# Demanded difficulty tab
|
||||
self.ui.lineEditTotalDifficulty.setText(str((float(BMConfigParser().getint(
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||
self.ui.lineEditSmallMessageDifficulty.setText(str((float(BMConfigParser().getint(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')) / shared.networkDefaultPayloadLengthExtraBytes)))
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')) / protocol.networkDefaultPayloadLengthExtraBytes)))
|
||||
|
||||
# Max acceptable difficulty tab
|
||||
self.ui.lineEditMaxAcceptableTotalDifficulty.setText(str((float(BMConfigParser().getint(
|
||||
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||
self.ui.lineEditMaxAcceptableSmallMessageDifficulty.setText(str((float(BMConfigParser().getint(
|
||||
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / shared.networkDefaultPayloadLengthExtraBytes)))
|
||||
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / protocol.networkDefaultPayloadLengthExtraBytes)))
|
||||
|
||||
# OpenCL
|
||||
if openclpow.openclAvailable():
|
||||
|
|
|
@ -15,7 +15,6 @@ from messagecompose import MessageCompose
|
|||
import settingsmixin
|
||||
from networkstatus import NetworkStatus
|
||||
from blacklist import Blacklist
|
||||
import shared
|
||||
|
||||
try:
|
||||
_fromUtf8 = QtCore.QString.fromUtf8
|
||||
|
|
|
@ -3,7 +3,7 @@ import os
|
|||
from PyQt4 import QtCore, QtGui
|
||||
|
||||
from configparser import BMConfigParser
|
||||
from shared import codePath
|
||||
import paths
|
||||
|
||||
class LanguageBox(QtGui.QComboBox):
|
||||
languageName = {"system": "System Settings", "eo": "Esperanto", "en_pirate": "Pirate English"}
|
||||
|
@ -14,7 +14,7 @@ class LanguageBox(QtGui.QComboBox):
|
|||
def populate(self):
|
||||
self.languages = []
|
||||
self.clear()
|
||||
localesPath = os.path.join (codePath(), 'translations')
|
||||
localesPath = os.path.join (paths.codePath(), 'translations')
|
||||
configuredLocale = "system"
|
||||
try:
|
||||
configuredLocale = BMConfigParser().get('bitmessagesettings', 'userlocale', "system")
|
||||
|
|
|
@ -11,9 +11,11 @@ from foldertree import AccountMixin
|
|||
from helper_sql import *
|
||||
from l10n import getTranslationLanguage
|
||||
from openclpow import openclAvailable, openclEnabled
|
||||
import paths
|
||||
from proofofwork import bmpow
|
||||
from pyelliptic.openssl import OpenSSL
|
||||
import shared
|
||||
import state
|
||||
from version import softwareVersion
|
||||
|
||||
# this is BM support address going to Peter Surda
|
||||
|
@ -63,7 +65,7 @@ def checkHasNormalAddress():
|
|||
|
||||
def createAddressIfNeeded(myapp):
|
||||
if not checkHasNormalAddress():
|
||||
shared.addressGeneratorQueue.put(('createRandomAddress', 4, 1, str(QtGui.QApplication.translate("Support", SUPPORT_MY_LABEL)), 1, "", False, shared.networkDefaultProofOfWorkNonceTrialsPerByte, shared.networkDefaultPayloadLengthExtraBytes))
|
||||
shared.addressGeneratorQueue.put(('createRandomAddress', 4, 1, str(QtGui.QApplication.translate("Support", SUPPORT_MY_LABEL)), 1, "", False, protocol.networkDefaultProofOfWorkNonceTrialsPerByte, protocol.networkDefaultPayloadLengthExtraBytes))
|
||||
while shared.shutdown == 0 and not checkHasNormalAddress():
|
||||
time.sleep(.2)
|
||||
myapp.rerenderComboBoxSendFrom()
|
||||
|
@ -104,9 +106,9 @@ def createSupportMessage(myapp):
|
|||
opensslversion = "%s (Python internal), %s (external for PyElliptic)" % (ssl.OPENSSL_VERSION, OpenSSL._lib.SSLeay_version(SSLEAY_VERSION))
|
||||
|
||||
frozen = "N/A"
|
||||
if shared.frozen:
|
||||
frozen = shared.frozen
|
||||
portablemode = "True" if shared.appdata == shared.lookupExeFolder() else "False"
|
||||
if paths.frozen:
|
||||
frozen = paths.frozen
|
||||
portablemode = "True" if state.appdata == paths.lookupExeFolder() else "False"
|
||||
cpow = "True" if bmpow else "False"
|
||||
#cpow = QtGui.QApplication.translate("Support", cpow)
|
||||
openclpow = str(BMConfigParser().safeGet('bitmessagesettings', 'opencl')) if openclEnabled() else "None"
|
||||
|
|
|
@ -4,6 +4,7 @@ import os
|
|||
import shared
|
||||
from addresses import addBMIfNotPresent
|
||||
from configparser import BMConfigParser
|
||||
import state
|
||||
|
||||
str_broadcast_subscribers = '[Broadcast subscribers]'
|
||||
str_chan = '[chan]'
|
||||
|
@ -82,8 +83,8 @@ def avatarize(address):
|
|||
extensions = ['PNG', 'GIF', 'JPG', 'JPEG', 'SVG', 'BMP', 'MNG', 'PBM', 'PGM', 'PPM', 'TIFF', 'XBM', 'XPM', 'TGA']
|
||||
# try to find a specific avatar
|
||||
for ext in extensions:
|
||||
lower_hash = shared.appdata + 'avatars/' + hash + '.' + ext.lower()
|
||||
upper_hash = shared.appdata + 'avatars/' + hash + '.' + ext.upper()
|
||||
lower_hash = state.appdata + 'avatars/' + hash + '.' + ext.lower()
|
||||
upper_hash = state.appdata + 'avatars/' + hash + '.' + ext.upper()
|
||||
if os.path.isfile(lower_hash):
|
||||
# print 'found avatar of ', address
|
||||
idcon.addFile(lower_hash)
|
||||
|
@ -94,8 +95,8 @@ def avatarize(address):
|
|||
return idcon
|
||||
# if we haven't found any, try to find a default avatar
|
||||
for ext in extensions:
|
||||
lower_default = shared.appdata + 'avatars/' + 'default.' + ext.lower()
|
||||
upper_default = shared.appdata + 'avatars/' + 'default.' + ext.upper()
|
||||
lower_default = state.appdata + 'avatars/' + 'default.' + ext.lower()
|
||||
upper_default = state.appdata + 'avatars/' + 'default.' + ext.upper()
|
||||
if os.path.isfile(lower_default):
|
||||
default = lower_default
|
||||
idcon.addFile(lower_default)
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
from PyQt4 import uic
|
||||
import os.path
|
||||
import paths
|
||||
import sys
|
||||
from shared import codePath
|
||||
|
||||
def resource_path(resFile):
|
||||
baseDir = codePath()
|
||||
baseDir = paths.codePath()
|
||||
for subDir in ["ui", "bitmessageqt"]:
|
||||
if os.path.isdir(os.path.join(baseDir, subDir)) and os.path.isfile(os.path.join(baseDir, subDir, resFile)):
|
||||
return os.path.join(baseDir, subDir, resFile)
|
||||
|
|
|
@ -77,13 +77,13 @@ class addressGenerator(threading.Thread, StoppableThread):
|
|||
if nonceTrialsPerByte == 0:
|
||||
nonceTrialsPerByte = BMConfigParser().getint(
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
||||
if nonceTrialsPerByte < shared.networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||
nonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
if nonceTrialsPerByte < protocol.networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||
nonceTrialsPerByte = protocol.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
if payloadLengthExtraBytes == 0:
|
||||
payloadLengthExtraBytes = BMConfigParser().getint(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
if payloadLengthExtraBytes < shared.networkDefaultPayloadLengthExtraBytes:
|
||||
payloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
||||
if payloadLengthExtraBytes < protocol.networkDefaultPayloadLengthExtraBytes:
|
||||
payloadLengthExtraBytes = protocol.networkDefaultPayloadLengthExtraBytes
|
||||
if command == 'createRandomAddress':
|
||||
shared.UISignalQueue.put((
|
||||
'updateStatusBar', tr._translate("MainWindow", "Generating one new address")))
|
||||
|
|
|
@ -467,7 +467,7 @@ class objectProcessor(threading.Thread):
|
|||
toAddress, 'noncetrialsperbyte')
|
||||
requiredPayloadLengthExtraBytes = BMConfigParser().getint(
|
||||
toAddress, 'payloadlengthextrabytes')
|
||||
if not shared.isProofOfWorkSufficient(data, requiredNonceTrialsPerByte, requiredPayloadLengthExtraBytes):
|
||||
if not protocol.isProofOfWorkSufficient(data, requiredNonceTrialsPerByte, requiredPayloadLengthExtraBytes):
|
||||
logger.info('Proof of work in msg is insufficient only because it does not meet our higher requirement.')
|
||||
return
|
||||
blockMessage = False # Gets set to True if the user shouldn't see the message according to black or white lists.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import errno
|
||||
import threading
|
||||
import time
|
||||
import random
|
||||
|
@ -12,6 +13,7 @@ from class_sendDataThread import *
|
|||
from class_receiveDataThread import *
|
||||
from configparser import BMConfigParser
|
||||
from helper_threading import *
|
||||
import state
|
||||
|
||||
# For each stream to which we connect, several outgoingSynSender threads
|
||||
# will exist and will collectively create 8 connections with peers.
|
||||
|
@ -252,12 +254,16 @@ class outgoingSynSender(threading.Thread, StoppableThread):
|
|||
logger.debug('SOCKS5 error: %s', str(err))
|
||||
else:
|
||||
logger.error('SOCKS5 error: %s', str(err))
|
||||
if err[0][0] == 4 or err[0][0] == 2:
|
||||
state.networkProtocolLastFailed['IPv6'] = time.time()
|
||||
except socks.Socks4Error as err:
|
||||
logger.error('Socks4Error: ' + str(err))
|
||||
except socket.error as err:
|
||||
if BMConfigParser().get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
|
||||
logger.error('Bitmessage MIGHT be having trouble connecting to the SOCKS server. ' + str(err))
|
||||
else:
|
||||
if ":" in peer.host and err[0] == errno.ENETUNREACH:
|
||||
state.networkProtocolLastFailed['IPv6'] = time.time()
|
||||
if shared.verbose >= 1:
|
||||
logger.debug('Could NOT connect to ' + str(peer) + 'during outgoing attempt. ' + str(err))
|
||||
|
||||
|
|
|
@ -28,8 +28,10 @@ from class_objectHashHolder import objectHashHolder
|
|||
from helper_generic import addDataPadding, isHostInPrivateIPRange
|
||||
from helper_sql import sqlQuery
|
||||
from debug import logger
|
||||
import paths
|
||||
import protocol
|
||||
from inventory import Inventory
|
||||
import state
|
||||
import tr
|
||||
from version import softwareVersion
|
||||
|
||||
|
@ -291,7 +293,7 @@ class receiveDataThread(threading.Thread):
|
|||
if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) and
|
||||
protocol.haveSSL(not self.initiatedConnection)):
|
||||
logger.debug("Initialising TLS")
|
||||
self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(shared.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(shared.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
|
||||
self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(paths.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(paths.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
|
||||
if hasattr(self.sslSock, "context"):
|
||||
self.sslSock.context.set_ecdh_curve("secp256k1")
|
||||
while True:
|
||||
|
@ -320,12 +322,12 @@ class receiveDataThread(threading.Thread):
|
|||
shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
|
||||
logger.debug('Connection fully established with ' + str(self.peer) + "\n" + \
|
||||
'The size of the connectedHostsList is now ' + str(len(shared.connectedHostsList)) + "\n" + \
|
||||
'The length of sendDataQueues is now: ' + str(len(shared.sendDataQueues)) + "\n" + \
|
||||
'The length of sendDataQueues is now: ' + str(len(state.sendDataQueues)) + "\n" + \
|
||||
'broadcasting addr from within connectionFullyEstablished function.')
|
||||
|
||||
# Let all of our peers know about this new node.
|
||||
dataToSend = (int(time.time()), self.streamNumber, 1, self.peer.host, self.remoteNodeIncomingPort)
|
||||
shared.broadcastToSendDataQueues((
|
||||
protocol.broadcastToSendDataQueues((
|
||||
self.streamNumber, 'advertisepeer', dataToSend))
|
||||
|
||||
self.sendaddr() # This is one large addr message to this one peer.
|
||||
|
@ -594,7 +596,7 @@ class receiveDataThread(threading.Thread):
|
|||
hostDetails = (
|
||||
timeSomeoneElseReceivedMessageFromThisNode,
|
||||
recaddrStream, recaddrServices, hostStandardFormat, recaddrPort)
|
||||
shared.broadcastToSendDataQueues((
|
||||
protocol.broadcastToSendDataQueues((
|
||||
self.streamNumber, 'advertisepeer', hostDetails))
|
||||
else:
|
||||
timeLastReceivedMessageFromThisNode = shared.knownNodes[recaddrStream][
|
||||
|
|
|
@ -14,6 +14,7 @@ from class_objectHashHolder import *
|
|||
from addresses import *
|
||||
from debug import logger
|
||||
import protocol
|
||||
import state
|
||||
|
||||
# Every connection to a peer has a sendDataThread (and also a
|
||||
# receiveDataThread).
|
||||
|
@ -22,7 +23,7 @@ class sendDataThread(threading.Thread):
|
|||
def __init__(self, sendDataThreadQueue):
|
||||
threading.Thread.__init__(self, name="sendData")
|
||||
self.sendDataThreadQueue = sendDataThreadQueue
|
||||
shared.sendDataQueues.append(self.sendDataThreadQueue)
|
||||
state.sendDataQueues.append(self.sendDataThreadQueue)
|
||||
self.data = ''
|
||||
self.objectHashHolderInstance = objectHashHolder(self.sendDataThreadQueue)
|
||||
self.objectHashHolderInstance.start()
|
||||
|
@ -102,7 +103,7 @@ class sendDataThread(threading.Thread):
|
|||
|
||||
|
||||
def run(self):
|
||||
logger.debug('sendDataThread starting. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(shared.sendDataQueues)))
|
||||
logger.debug('sendDataThread starting. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(state.sendDataQueues)))
|
||||
while True:
|
||||
deststream, command, data = self.sendDataThreadQueue.get()
|
||||
|
||||
|
@ -190,6 +191,6 @@ class sendDataThread(threading.Thread):
|
|||
self.sock.close()
|
||||
except:
|
||||
pass
|
||||
shared.sendDataQueues.remove(self.sendDataThreadQueue)
|
||||
logger.info('sendDataThread ending. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(shared.sendDataQueues)))
|
||||
state.sendDataQueues.remove(self.sendDataThreadQueue)
|
||||
logger.info('sendDataThread ending. ID: ' + str(id(self)) + '. Number of queues in sendDataQueues: ' + str(len(state.sendDataQueues)))
|
||||
self.objectHashHolderInstance.close()
|
||||
|
|
|
@ -11,7 +11,8 @@ from helper_sql import *
|
|||
from helper_threading import *
|
||||
from inventory import Inventory
|
||||
from debug import logger
|
||||
from state import neededPubkeys
|
||||
import protocol
|
||||
import state
|
||||
|
||||
"""
|
||||
The singleCleaner class is a timer-driven thread that cleans data structures
|
||||
|
@ -53,7 +54,7 @@ class singleCleaner(threading.Thread, StoppableThread):
|
|||
Inventory().flush()
|
||||
shared.UISignalQueue.put(('updateStatusBar', ''))
|
||||
|
||||
shared.broadcastToSendDataQueues((
|
||||
protocol.broadcastToSendDataQueues((
|
||||
0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
|
||||
# If we are running as a daemon then we are going to fill up the UI
|
||||
# queue which will never be handled by a UI. We should clear it to
|
||||
|
@ -98,7 +99,7 @@ class singleCleaner(threading.Thread, StoppableThread):
|
|||
# Let us write out the knowNodes to disk if there is anything new to write out.
|
||||
if shared.needToWriteKnownNodesToDisk:
|
||||
shared.knownNodesLock.acquire()
|
||||
output = open(shared.appdata + 'knownnodes.dat', 'wb')
|
||||
output = open(state.appdata + 'knownnodes.dat', 'wb')
|
||||
try:
|
||||
pickle.dump(shared.knownNodes, output)
|
||||
output.close()
|
||||
|
@ -116,7 +117,7 @@ class singleCleaner(threading.Thread, StoppableThread):
|
|||
def resendPubkeyRequest(address):
|
||||
logger.debug('It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.')
|
||||
try:
|
||||
del neededPubkeys[
|
||||
del state.neededPubkeys[
|
||||
address] # We need to take this entry out of the neededPubkeys structure because the shared.workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
|
||||
except:
|
||||
pass
|
||||
|
|
|
@ -165,7 +165,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
payload += pubEncryptionKey[1:]
|
||||
|
||||
# Do the POW for this pubkey message
|
||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
logger.info('(For pubkey message) Doing proof of work...')
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
|
@ -255,7 +255,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
payload += signature
|
||||
|
||||
# Do the POW for this pubkey message
|
||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
logger.info('(For pubkey message) Doing proof of work...')
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
|
@ -345,7 +345,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
dataToEncrypt, hexlify(pubEncryptionKey))
|
||||
|
||||
# Do the POW for this pubkey message
|
||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
logger.info('(For pubkey message) Doing proof of work...')
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
|
@ -466,7 +466,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
payload += highlevelcrypto.encrypt(
|
||||
dataToEncrypt, hexlify(pubEncryptionKey))
|
||||
|
||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
logger.info('(For broadcast message) Doing proof of work...')
|
||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||
ackdata, tr._translate("MainWindow", "Doing work necessary to send broadcast..."))))
|
||||
|
@ -659,8 +659,8 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
|
||||
# Let us fetch the amount of work required by the recipient.
|
||||
if toAddressVersionNumber == 2:
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte = protocol.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
requiredPayloadLengthExtraBytes = protocol.networkDefaultPayloadLengthExtraBytes
|
||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||
ackdata, tr._translate("MainWindow", "Doing work necessary to send message.\nThere is no required difficulty for version 2 addresses like this."))))
|
||||
elif toAddressVersionNumber >= 3:
|
||||
|
@ -670,13 +670,13 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
requiredPayloadLengthExtraBytes, varintLength = decodeVarint(
|
||||
pubkeyPayload[readPosition:readPosition + 10])
|
||||
readPosition += varintLength
|
||||
if requiredAverageProofOfWorkNonceTrialsPerByte < shared.networkDefaultProofOfWorkNonceTrialsPerByte: # We still have to meet a minimum POW difficulty regardless of what they say is allowed in order to get our message to propagate through the network.
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
if requiredPayloadLengthExtraBytes < shared.networkDefaultPayloadLengthExtraBytes:
|
||||
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
||||
if requiredAverageProofOfWorkNonceTrialsPerByte < protocol.networkDefaultProofOfWorkNonceTrialsPerByte: # We still have to meet a minimum POW difficulty regardless of what they say is allowed in order to get our message to propagate through the network.
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte = protocol.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
if requiredPayloadLengthExtraBytes < protocol.networkDefaultPayloadLengthExtraBytes:
|
||||
requiredPayloadLengthExtraBytes = protocol.networkDefaultPayloadLengthExtraBytes
|
||||
logger.debug('Using averageProofOfWorkNonceTrialsPerByte: %s and payloadLengthExtraBytes: %s.' % (requiredAverageProofOfWorkNonceTrialsPerByte, requiredPayloadLengthExtraBytes))
|
||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr._translate("MainWindow", "Doing work necessary to send message.\nReceiver\'s required difficulty: %1 and %2").arg(str(float(
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)))))
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(requiredPayloadLengthExtraBytes) / protocol.networkDefaultPayloadLengthExtraBytes)))))
|
||||
if status != 'forcepow':
|
||||
if (requiredAverageProofOfWorkNonceTrialsPerByte > BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte') and BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte') != 0) or (requiredPayloadLengthExtraBytes > BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') and BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') != 0):
|
||||
# The demanded difficulty is more than we are willing
|
||||
|
@ -684,8 +684,8 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
sqlExecute(
|
||||
'''UPDATE sent SET status='toodifficult' WHERE ackdata=? ''',
|
||||
ackdata)
|
||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr._translate("MainWindow", "Problem: The work demanded by the recipient (%1 and %2) is more difficult than you are willing to do. %3").arg(str(float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(
|
||||
requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)).arg(l10n.formatTimestamp()))))
|
||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr._translate("MainWindow", "Problem: The work demanded by the recipient (%1 and %2) is more difficult than you are willing to do. %3").arg(str(float(requiredAverageProofOfWorkNonceTrialsPerByte) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(
|
||||
requiredPayloadLengthExtraBytes) / protocol.networkDefaultPayloadLengthExtraBytes)).arg(l10n.formatTimestamp()))))
|
||||
continue
|
||||
else: # if we are sending a message to ourselves or a chan..
|
||||
logger.info('Sending a message.')
|
||||
|
@ -703,8 +703,8 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
privEncryptionKeyBase58))
|
||||
pubEncryptionKeyBase256 = unhexlify(highlevelcrypto.privToPub(
|
||||
privEncryptionKeyHex))[1:]
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte = protocol.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
requiredPayloadLengthExtraBytes = protocol.networkDefaultPayloadLengthExtraBytes
|
||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||
ackdata, tr._translate("MainWindow", "Doing work necessary to send message."))))
|
||||
|
||||
|
@ -747,9 +747,9 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
# the receiver is in any of those lists.
|
||||
if shared.isAddressInMyAddressBookSubscriptionsListOrWhitelist(toaddress):
|
||||
payload += encodeVarint(
|
||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
protocol.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
payload += encodeVarint(
|
||||
shared.networkDefaultPayloadLengthExtraBytes)
|
||||
protocol.networkDefaultPayloadLengthExtraBytes)
|
||||
else:
|
||||
payload += encodeVarint(BMConfigParser().getint(
|
||||
fromaddress, 'noncetrialsperbyte'))
|
||||
|
@ -790,7 +790,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
encryptedPayload += encodeVarint(1) # msg version
|
||||
encryptedPayload += encodeVarint(toStreamNumber) + encrypted
|
||||
target = 2 ** 64 / (requiredAverageProofOfWorkNonceTrialsPerByte*(len(encryptedPayload) + 8 + requiredPayloadLengthExtraBytes + ((TTL*(len(encryptedPayload)+8+requiredPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
logger.info('(For msg message) Doing proof of work. Total required difficulty: %f. Required small message difficulty: %f.', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)
|
||||
logger.info('(For msg message) Doing proof of work. Total required difficulty: %f. Required small message difficulty: %f.', float(requiredAverageProofOfWorkNonceTrialsPerByte) / protocol.networkDefaultProofOfWorkNonceTrialsPerByte, float(requiredPayloadLengthExtraBytes) / protocol.networkDefaultPayloadLengthExtraBytes)
|
||||
|
||||
powStartTime = time.time()
|
||||
initialHash = hashlib.sha512(encryptedPayload).digest()
|
||||
|
@ -913,7 +913,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
shared.UISignalQueue.put(('updateSentItemStatusByToAddress', (
|
||||
toAddress, tr._translate("MainWindow",'Doing work necessary to request encryption key.'))))
|
||||
|
||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
logger.info('Found proof of work ' + str(trialValue) + ' Nonce: ' + str(nonce))
|
||||
|
@ -966,7 +966,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
payload += encodeVarint(1) # msg version
|
||||
payload += encodeVarint(toStreamNumber) + ackdata
|
||||
|
||||
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
target = 2 ** 64 / (protocol.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + protocol.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+protocol.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||
logger.info('(For ack message) Doing proof of work. TTL set to ' + str(TTL))
|
||||
|
||||
powStartTime = time.time()
|
||||
|
|
|
@ -8,7 +8,9 @@ import sys
|
|||
import os
|
||||
from debug import logger
|
||||
from namecoin import ensureNamecoinOptions
|
||||
import paths
|
||||
import random
|
||||
import state
|
||||
import string
|
||||
import tr#anslate
|
||||
|
||||
|
@ -23,7 +25,7 @@ class sqlThread(threading.Thread):
|
|||
threading.Thread.__init__(self, name="SQL")
|
||||
|
||||
def run(self):
|
||||
self.conn = sqlite3.connect(shared.appdata + 'messages.dat')
|
||||
self.conn = sqlite3.connect(state.appdata + 'messages.dat')
|
||||
self.conn.text_factory = str
|
||||
self.cur = self.conn.cursor()
|
||||
|
||||
|
@ -112,9 +114,9 @@ class sqlThread(threading.Thread):
|
|||
|
||||
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 4:
|
||||
BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||
protocol.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||
BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
|
||||
shared.networkDefaultPayloadLengthExtraBytes))
|
||||
protocol.networkDefaultPayloadLengthExtraBytes))
|
||||
BMConfigParser().set('bitmessagesettings', 'settingsversion', '5')
|
||||
|
||||
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 5:
|
||||
|
@ -235,8 +237,8 @@ class sqlThread(threading.Thread):
|
|||
# Raise the default required difficulty from 1 to 2
|
||||
# With the change to protocol v3, this is obsolete.
|
||||
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 6:
|
||||
"""if int(shared.config.get('bitmessagesettings','defaultnoncetrialsperbyte')) == shared.networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||
shared.config.set('bitmessagesettings','defaultnoncetrialsperbyte', str(shared.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
|
||||
"""if int(shared.config.get('bitmessagesettings','defaultnoncetrialsperbyte')) == protocol.networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||
shared.config.set('bitmessagesettings','defaultnoncetrialsperbyte', str(protocol.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
|
||||
"""
|
||||
BMConfigParser().set('bitmessagesettings', 'settingsversion', '7')
|
||||
|
||||
|
@ -302,8 +304,8 @@ class sqlThread(threading.Thread):
|
|||
|
||||
# With the change to protocol version 3, reset the user-settable difficulties to 1
|
||||
if BMConfigParser().getint('bitmessagesettings', 'settingsversion') == 8:
|
||||
BMConfigParser().set('bitmessagesettings','defaultnoncetrialsperbyte', str(shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||
BMConfigParser().set('bitmessagesettings','defaultpayloadlengthextrabytes', str(shared.networkDefaultPayloadLengthExtraBytes))
|
||||
BMConfigParser().set('bitmessagesettings','defaultnoncetrialsperbyte', str(protocol.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||
BMConfigParser().set('bitmessagesettings','defaultpayloadlengthextrabytes', str(protocol.networkDefaultPayloadLengthExtraBytes))
|
||||
previousTotalDifficulty = int(BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / 320
|
||||
previousSmallMessageDifficulty = int(BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / 14000
|
||||
BMConfigParser().set('bitmessagesettings','maxacceptablenoncetrialsperbyte', str(previousTotalDifficulty * 1000))
|
||||
|
@ -331,9 +333,9 @@ class sqlThread(threading.Thread):
|
|||
|
||||
# sanity check
|
||||
if BMConfigParser().getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte') == 0:
|
||||
BMConfigParser().set('bitmessagesettings','maxacceptablenoncetrialsperbyte', str(shared.ridiculousDifficulty * shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||
BMConfigParser().set('bitmessagesettings','maxacceptablenoncetrialsperbyte', str(shared.ridiculousDifficulty * protocol.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||
if BMConfigParser().getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') == 0:
|
||||
BMConfigParser().set('bitmessagesettings','maxacceptablepayloadlengthextrabytes', str(shared.ridiculousDifficulty * shared.networkDefaultPayloadLengthExtraBytes))
|
||||
BMConfigParser().set('bitmessagesettings','maxacceptablepayloadlengthextrabytes', str(shared.ridiculousDifficulty * protocol.networkDefaultPayloadLengthExtraBytes))
|
||||
|
||||
# The format of data stored in the pubkeys table has changed. Let's
|
||||
# clear it, and the pubkeys from inventory, so that they'll be re-downloaded.
|
||||
|
@ -507,8 +509,8 @@ class sqlThread(threading.Thread):
|
|||
os._exit(0)
|
||||
self.conn.close()
|
||||
shutil.move(
|
||||
shared.lookupAppdataFolder() + 'messages.dat', shared.lookupExeFolder() + 'messages.dat')
|
||||
self.conn = sqlite3.connect(shared.lookupExeFolder() + 'messages.dat')
|
||||
paths.lookupAppdataFolder() + 'messages.dat', paths.lookupExeFolder() + 'messages.dat')
|
||||
self.conn = sqlite3.connect(paths.lookupExeFolder() + 'messages.dat')
|
||||
self.conn.text_factory = str
|
||||
self.cur = self.conn.cursor()
|
||||
elif item == 'movemessagstoappdata':
|
||||
|
@ -523,8 +525,8 @@ class sqlThread(threading.Thread):
|
|||
os._exit(0)
|
||||
self.conn.close()
|
||||
shutil.move(
|
||||
shared.lookupExeFolder() + 'messages.dat', shared.lookupAppdataFolder() + 'messages.dat')
|
||||
self.conn = sqlite3.connect(shared.lookupAppdataFolder() + 'messages.dat')
|
||||
paths.lookupExeFolder() + 'messages.dat', paths.lookupAppdataFolder() + 'messages.dat')
|
||||
self.conn = sqlite3.connect(paths.lookupAppdataFolder() + 'messages.dat')
|
||||
self.conn.text_factory = str
|
||||
self.cur = self.conn.cursor()
|
||||
elif item == 'deleteandvacuume':
|
||||
|
|
11
src/debug.py
11
src/debug.py
|
@ -23,6 +23,7 @@ import shared
|
|||
import sys
|
||||
import traceback
|
||||
import helper_startup
|
||||
import state
|
||||
helper_startup.loadConfig()
|
||||
|
||||
# Now can be overriden from a config file, which uses standard python logging.config.fileConfig interface
|
||||
|
@ -36,12 +37,12 @@ def log_uncaught_exceptions(ex_cls, ex, tb):
|
|||
def configureLogging():
|
||||
have_logging = False
|
||||
try:
|
||||
logging.config.fileConfig(os.path.join (shared.appdata, 'logging.dat'))
|
||||
logging.config.fileConfig(os.path.join (state.appdata, 'logging.dat'))
|
||||
have_logging = True
|
||||
print "Loaded logger configuration from %s" % (os.path.join(shared.appdata, 'logging.dat'))
|
||||
print "Loaded logger configuration from %s" % (os.path.join(state.appdata, 'logging.dat'))
|
||||
except:
|
||||
if os.path.isfile(os.path.join(shared.appdata, 'logging.dat')):
|
||||
print "Failed to load logger configuration from %s, using default logging config" % (os.path.join(shared.appdata, 'logging.dat'))
|
||||
if os.path.isfile(os.path.join(state.appdata, 'logging.dat')):
|
||||
print "Failed to load logger configuration from %s, using default logging config" % (os.path.join(state.appdata, 'logging.dat'))
|
||||
print sys.exc_info()
|
||||
else:
|
||||
# no need to confuse the user if the logger config is missing entirely
|
||||
|
@ -70,7 +71,7 @@ def configureLogging():
|
|||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'formatter': 'default',
|
||||
'level': log_level,
|
||||
'filename': shared.appdata + 'debug.log',
|
||||
'filename': state.appdata + 'debug.log',
|
||||
'maxBytes': 2097152, # 2 MiB
|
||||
'backupCount': 1,
|
||||
'encoding': 'UTF-8',
|
||||
|
|
|
@ -7,12 +7,13 @@ import time
|
|||
from configparser import BMConfigParser
|
||||
from debug import logger
|
||||
import socks
|
||||
import state
|
||||
|
||||
def knownNodes():
|
||||
try:
|
||||
# We shouldn't have to use the shared.knownNodesLock because this had
|
||||
# better be the only thread accessing knownNodes right now.
|
||||
pickleFile = open(shared.appdata + 'knownnodes.dat', 'rb')
|
||||
pickleFile = open(state.appdata + 'knownnodes.dat', 'rb')
|
||||
loadedKnownNodes = pickle.load(pickleFile)
|
||||
pickleFile.close()
|
||||
# The old format of storing knownNodes was as a 'host: (port, time)'
|
||||
|
@ -28,7 +29,7 @@ def knownNodes():
|
|||
peer, lastseen = node_tuple
|
||||
shared.knownNodes[stream][peer] = lastseen
|
||||
except:
|
||||
shared.knownNodes = defaultKnownNodes.createDefaultKnownNodes(shared.appdata)
|
||||
shared.knownNodes = defaultKnownNodes.createDefaultKnownNodes(state.appdata)
|
||||
# your own onion address, if setup
|
||||
if BMConfigParser().has_option('bitmessagesettings', 'onionhostname') and ".onion" in BMConfigParser().get('bitmessagesettings', 'onionhostname'):
|
||||
shared.knownNodes[1][shared.Peer(BMConfigParser().get('bitmessagesettings', 'onionhostname'), BMConfigParser().getint('bitmessagesettings', 'onionport'))] = int(time.time())
|
||||
|
|
|
@ -10,6 +10,8 @@ import platform
|
|||
from distutils.version import StrictVersion
|
||||
|
||||
from namecoin import ensureNamecoinOptions
|
||||
import paths
|
||||
import state
|
||||
|
||||
storeConfigFilesInSameDirectoryAsProgramByDefault = False # The user may de-select Portable Mode in the settings if they want the config files to stay in the application data folder.
|
||||
|
||||
|
@ -25,31 +27,31 @@ def _loadTrustedPeer():
|
|||
shared.trustedPeer = shared.Peer(host, int(port))
|
||||
|
||||
def loadConfig():
|
||||
if shared.appdata:
|
||||
BMConfigParser().read(shared.appdata + 'keys.dat')
|
||||
#shared.appdata must have been specified as a startup option.
|
||||
if state.appdata:
|
||||
BMConfigParser().read(state.appdata + 'keys.dat')
|
||||
#state.appdata must have been specified as a startup option.
|
||||
try:
|
||||
BMConfigParser().get('bitmessagesettings', 'settingsversion')
|
||||
print 'Loading config files from directory specified on startup: ' + shared.appdata
|
||||
print 'Loading config files from directory specified on startup: ' + state.appdata
|
||||
needToCreateKeysFile = False
|
||||
except:
|
||||
needToCreateKeysFile = True
|
||||
|
||||
else:
|
||||
BMConfigParser().read(shared.lookupExeFolder() + 'keys.dat')
|
||||
BMConfigParser().read(paths.lookupExeFolder() + 'keys.dat')
|
||||
try:
|
||||
BMConfigParser().get('bitmessagesettings', 'settingsversion')
|
||||
print 'Loading config files from same directory as program.'
|
||||
needToCreateKeysFile = False
|
||||
shared.appdata = shared.lookupExeFolder()
|
||||
state.appdata = paths.lookupExeFolder()
|
||||
except:
|
||||
# Could not load the keys.dat file in the program directory. Perhaps it
|
||||
# is in the appdata directory.
|
||||
shared.appdata = shared.lookupAppdataFolder()
|
||||
BMConfigParser().read(shared.appdata + 'keys.dat')
|
||||
state.appdata = paths.lookupAppdataFolder()
|
||||
BMConfigParser().read(state.appdata + 'keys.dat')
|
||||
try:
|
||||
BMConfigParser().get('bitmessagesettings', 'settingsversion')
|
||||
print 'Loading existing config files from', shared.appdata
|
||||
print 'Loading existing config files from', state.appdata
|
||||
needToCreateKeysFile = False
|
||||
except:
|
||||
needToCreateKeysFile = True
|
||||
|
@ -90,9 +92,9 @@ def loadConfig():
|
|||
BMConfigParser().set(
|
||||
'bitmessagesettings', 'messagesencrypted', 'false')
|
||||
BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||
protocol.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||
BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
|
||||
shared.networkDefaultPayloadLengthExtraBytes))
|
||||
protocol.networkDefaultPayloadLengthExtraBytes))
|
||||
BMConfigParser().set('bitmessagesettings', 'minimizeonclose', 'false')
|
||||
BMConfigParser().set(
|
||||
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', '0')
|
||||
|
@ -127,12 +129,12 @@ def loadConfig():
|
|||
if storeConfigFilesInSameDirectoryAsProgramByDefault:
|
||||
# Just use the same directory as the program and forget about
|
||||
# the appdata folder
|
||||
shared.appdata = ''
|
||||
state.appdata = ''
|
||||
print 'Creating new config files in same directory as program.'
|
||||
else:
|
||||
print 'Creating new config files in', shared.appdata
|
||||
if not os.path.exists(shared.appdata):
|
||||
os.makedirs(shared.appdata)
|
||||
print 'Creating new config files in', state.appdata
|
||||
if not os.path.exists(state.appdata):
|
||||
os.makedirs(state.appdata)
|
||||
if not sys.platform.startswith('win'):
|
||||
os.umask(0o077)
|
||||
shared.writeKeysFile()
|
||||
|
|
|
@ -4,7 +4,6 @@ import os
|
|||
import time
|
||||
|
||||
from configparser import BMConfigParser
|
||||
import shared
|
||||
|
||||
|
||||
#logger = logging.getLogger(__name__)
|
||||
|
|
|
@ -6,10 +6,11 @@ import sqlite3
|
|||
from time import strftime, localtime
|
||||
import sys
|
||||
import shared
|
||||
import state
|
||||
import string
|
||||
from binascii import hexlify
|
||||
|
||||
appdata = shared.lookupAppdataFolder()
|
||||
appdata = paths.lookupAppdataFolder()
|
||||
|
||||
conn = sqlite3.connect( appdata + 'messages.dat' )
|
||||
conn.text_factory = str
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import asyncore
|
||||
|
||||
from http import HTTPClient
|
||||
import paths
|
||||
from tls import TLSHandshake
|
||||
|
||||
# self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(shared.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(shared.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
|
||||
# self.sslSock = ssl.wrap_socket(self.sock, keyfile = os.path.join(paths.codePath(), 'sslkeys', 'key.pem'), certfile = os.path.join(paths.codePath(), 'sslkeys', 'cert.pem'), server_side = not self.initiatedConnection, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False, ciphers='AECDH-AES256-SHA')
|
||||
|
||||
|
||||
class HTTPSClient(HTTPClient, TLSHandshake):
|
||||
|
|
|
@ -6,7 +6,8 @@ import random
|
|||
import os
|
||||
|
||||
from configparser import BMConfigParser
|
||||
from shared import codePath, shutdown
|
||||
import paths
|
||||
from shared import shutdown
|
||||
from debug import logger
|
||||
|
||||
libAvailable = True
|
||||
|
@ -40,7 +41,7 @@ def initCL():
|
|||
if (len(enabledGpus) > 0):
|
||||
ctx = cl.Context(devices=enabledGpus)
|
||||
queue = cl.CommandQueue(ctx)
|
||||
f = open(os.path.join(codePath(), "bitmsghash", 'bitmsghash.cl'), 'r')
|
||||
f = open(os.path.join(paths.codePath(), "bitmsghash", 'bitmsghash.cl'), 'r')
|
||||
fstr = ''.join(f.readlines())
|
||||
program = cl.Program(ctx, fstr).build(options="")
|
||||
logger.info("Loaded OpenCL kernel")
|
||||
|
|
70
src/paths.py
Normal file
70
src/paths.py
Normal file
|
@ -0,0 +1,70 @@
|
|||
from os import environ, path
|
||||
import sys
|
||||
|
||||
# When using py2exe or py2app, the variable frozen is added to the sys
|
||||
# namespace. This can be used to setup a different code path for
|
||||
# binary distributions vs source distributions.
|
||||
frozen = getattr(sys,'frozen', None)
|
||||
|
||||
def lookupExeFolder():
|
||||
if frozen:
|
||||
if frozen == "macosx_app":
|
||||
# targetdir/Bitmessage.app/Contents/MacOS/Bitmessage
|
||||
exeFolder = path.dirname(path.dirname(path.dirname(path.dirname(sys.executable)))) + path.sep
|
||||
else:
|
||||
exeFolder = path.dirname(sys.executable) + path.sep
|
||||
elif __file__:
|
||||
exeFolder = path.dirname(__file__) + path.sep
|
||||
else:
|
||||
exeFolder = ''
|
||||
return exeFolder
|
||||
|
||||
def lookupAppdataFolder():
|
||||
APPNAME = "PyBitmessage"
|
||||
if "BITMESSAGE_HOME" in environ:
|
||||
dataFolder = environ["BITMESSAGE_HOME"]
|
||||
if dataFolder[-1] not in [os.path.sep, os.path.altsep]:
|
||||
dataFolder += os.path.sep
|
||||
elif sys.platform == 'darwin':
|
||||
if "HOME" in environ:
|
||||
dataFolder = path.join(os.environ["HOME"], "Library/Application Support/", APPNAME) + '/'
|
||||
else:
|
||||
stringToLog = 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
|
||||
if 'logger' in globals():
|
||||
logger.critical(stringToLog)
|
||||
else:
|
||||
print stringToLog
|
||||
sys.exit()
|
||||
|
||||
elif 'win32' in sys.platform or 'win64' in sys.platform:
|
||||
dataFolder = path.join(environ['APPDATA'].decode(sys.getfilesystemencoding(), 'ignore'), APPNAME) + path.sep
|
||||
else:
|
||||
from shutil import move
|
||||
try:
|
||||
dataFolder = path.join(environ["XDG_CONFIG_HOME"], APPNAME)
|
||||
except KeyError:
|
||||
dataFolder = path.join(environ["HOME"], ".config", APPNAME)
|
||||
|
||||
# Migrate existing data to the proper location if this is an existing install
|
||||
try:
|
||||
move(path.join(environ["HOME"], ".%s" % APPNAME), dataFolder)
|
||||
stringToLog = "Moving data folder to %s" % (dataFolder)
|
||||
if 'logger' in globals():
|
||||
logger.info(stringToLog)
|
||||
else:
|
||||
print stringToLog
|
||||
except IOError:
|
||||
# Old directory may not exist.
|
||||
pass
|
||||
dataFolder = dataFolder + '/'
|
||||
return dataFolder
|
||||
|
||||
def codePath():
|
||||
if frozen == "macosx_app":
|
||||
codePath = environ.get("RESOURCEPATH")
|
||||
elif frozen: # windows
|
||||
codePath = sys._MEIPASS
|
||||
else:
|
||||
codePath = path.dirname(__file__)
|
||||
return codePath
|
||||
|
|
@ -8,6 +8,7 @@ import sys
|
|||
import time
|
||||
from configparser import BMConfigParser
|
||||
from debug import logger
|
||||
import paths
|
||||
import shared
|
||||
import openclpow
|
||||
import tr
|
||||
|
@ -169,15 +170,15 @@ def buildCPoW():
|
|||
|
||||
if bmpow is not None:
|
||||
return
|
||||
if shared.frozen is not None:
|
||||
if paths.frozen is not None:
|
||||
notifyBuild(False)
|
||||
return
|
||||
if sys.platform in ["win32", "win64"]:
|
||||
notifyBuild(False)
|
||||
return
|
||||
try:
|
||||
call(["make", "-C", os.path.join(shared.codePath(), "bitmsghash")])
|
||||
if os.path.exists(os.path.join(shared.codePath(), "bitmsghash", "bitmsghash.so")):
|
||||
call(["make", "-C", os.path.join(paths.codePath(), "bitmsghash")])
|
||||
if os.path.exists(os.path.join(paths.codePath(), "bitmsghash", "bitmsghash.so")):
|
||||
init()
|
||||
notifyBuild(True)
|
||||
else:
|
||||
|
@ -208,7 +209,7 @@ def run(target, initialHash):
|
|||
raise
|
||||
except:
|
||||
pass # fallback
|
||||
if shared.frozen == "macosx_app" or not shared.frozen:
|
||||
if paths.frozen == "macosx_app" or not paths.frozen:
|
||||
# on my (Peter Surda) Windows 10, Windows Defender
|
||||
# does not like this and fights with PyBitmessage
|
||||
# over CPU, resulting in very slow PoW
|
||||
|
@ -238,7 +239,7 @@ def init():
|
|||
bitmsglib = 'bitmsghash64.dll'
|
||||
try:
|
||||
# MSVS
|
||||
bso = ctypes.WinDLL(os.path.join(shared.codePath(), "bitmsghash", bitmsglib))
|
||||
bso = ctypes.WinDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
||||
logger.info("Loaded C PoW DLL (stdcall) %s", bitmsglib)
|
||||
bmpow = bso.BitmessagePOW
|
||||
bmpow.restype = ctypes.c_ulonglong
|
||||
|
@ -248,7 +249,7 @@ def init():
|
|||
logger.error("C PoW test fail.", exc_info=True)
|
||||
try:
|
||||
# MinGW
|
||||
bso = ctypes.CDLL(os.path.join(shared.codePath(), "bitmsghash", bitmsglib))
|
||||
bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
||||
logger.info("Loaded C PoW DLL (cdecl) %s", bitmsglib)
|
||||
bmpow = bso.BitmessagePOW
|
||||
bmpow.restype = ctypes.c_ulonglong
|
||||
|
@ -259,7 +260,7 @@ def init():
|
|||
bso = None
|
||||
else:
|
||||
try:
|
||||
bso = ctypes.CDLL(os.path.join(shared.codePath(), "bitmsghash", bitmsglib))
|
||||
bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
||||
logger.info("Loaded C PoW DLL %s", bitmsglib)
|
||||
except:
|
||||
bso = None
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import base64
|
||||
from binascii import hexlify
|
||||
import hashlib
|
||||
import random
|
||||
import socket
|
||||
|
@ -7,9 +8,13 @@ from struct import pack, unpack, Struct
|
|||
import sys
|
||||
import time
|
||||
|
||||
from addresses import encodeVarint, decodeVarint
|
||||
from addresses import calculateInventoryHash, encodeVarint, decodeVarint, decodeAddress, varintDecodeError
|
||||
from configparser import BMConfigParser
|
||||
from state import neededPubkeys, extPort, socksIP
|
||||
from debug import logger
|
||||
from helper_sql import sqlExecute
|
||||
import highlevelcrypto
|
||||
from inventory import Inventory
|
||||
import state
|
||||
from version import softwareVersion
|
||||
|
||||
#Service flags
|
||||
|
@ -22,6 +27,10 @@ BITFIELD_DOESACK = 1
|
|||
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack(
|
||||
'>Q', random.randrange(1, 18446744073709551615))
|
||||
|
||||
#If changed, these values will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
|
||||
networkDefaultProofOfWorkNonceTrialsPerByte = 1000 #The amount of work that should be performed (and demanded) per byte of the payload.
|
||||
networkDefaultPayloadLengthExtraBytes = 1000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
|
||||
|
||||
#Compiled struct for packing/unpacking headers
|
||||
#New code should use CreatePacket instead of Header.pack
|
||||
Header = Struct('!L12sL4s')
|
||||
|
@ -79,11 +88,26 @@ def sslProtocolVersion():
|
|||
|
||||
def checkSocksIP(host):
|
||||
try:
|
||||
if socksIP is None or not socksIP:
|
||||
socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
||||
if state.socksIP is None or not state.socksIP:
|
||||
state.socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
||||
except NameError:
|
||||
socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
||||
return socksIP == host
|
||||
state.socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
||||
return state.socksIP == host
|
||||
|
||||
def isProofOfWorkSufficient(data,
|
||||
nonceTrialsPerByte=0,
|
||||
payloadLengthExtraBytes=0):
|
||||
if nonceTrialsPerByte < networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||
nonceTrialsPerByte = networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
if payloadLengthExtraBytes < networkDefaultPayloadLengthExtraBytes:
|
||||
payloadLengthExtraBytes = networkDefaultPayloadLengthExtraBytes
|
||||
endOfLifeTime, = unpack('>Q', data[8:16])
|
||||
TTL = endOfLifeTime - int(time.time())
|
||||
if TTL < 300:
|
||||
TTL = 300
|
||||
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
|
||||
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8])
|
||||
return POW <= 2 ** 64 / (nonceTrialsPerByte*(len(data) + payloadLengthExtraBytes + ((TTL*(len(data)+payloadLengthExtraBytes))/(2 ** 16))))
|
||||
|
||||
# Packet creation
|
||||
|
||||
|
@ -117,10 +141,10 @@ def assembleVersionMessage(remoteHost, remotePort, myStreamNumber, server = Fals
|
|||
# we have a separate extPort and
|
||||
# incoming over clearnet or
|
||||
# outgoing through clearnet
|
||||
if BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp') and extPort \
|
||||
if BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp') and state.extPort \
|
||||
and ((server and not checkSocksIP(remoteHost)) or \
|
||||
(BMConfigParser().get("bitmessagesettings", "socksproxytype") == "none" and not server)):
|
||||
payload += pack('>H', extPort)
|
||||
payload += pack('>H', state.extPort)
|
||||
elif checkSocksIP(remoteHost) and server: # incoming connection over Tor
|
||||
payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'onionport'))
|
||||
else: # no extPort and not incoming over Tor
|
||||
|
@ -178,7 +202,7 @@ def decryptAndCheckPubkeyPayload(data, address):
|
|||
encryptedData = data[readPosition:]
|
||||
|
||||
# Let us try to decrypt the pubkey
|
||||
toAddress, cryptorObject = neededPubkeys[tag]
|
||||
toAddress, cryptorObject = state.neededPubkeys[tag]
|
||||
if toAddress != address:
|
||||
logger.critical('decryptAndCheckPubkeyPayload failed due to toAddress mismatch. This is very peculiar. toAddress: %s, address %s' % (toAddress, address))
|
||||
# the only way I can think that this could happen is if someone encodes their address data two different ways.
|
||||
|
@ -308,7 +332,7 @@ def _checkAndShareUndefinedObjectWithPeers(data):
|
|||
readPosition += objectVersionLength
|
||||
streamNumber, streamNumberLength = decodeVarint(
|
||||
data[readPosition:readPosition + 9])
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
|
||||
|
@ -331,7 +355,7 @@ def _checkAndShareMsgWithPeers(data):
|
|||
readPosition += objectVersionLength
|
||||
streamNumber, streamNumberLength = decodeVarint(
|
||||
data[readPosition:readPosition + 9])
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
readPosition += streamNumberLength
|
||||
|
@ -362,7 +386,7 @@ def _checkAndShareGetpubkeyWithPeers(data):
|
|||
readPosition += addressVersionLength
|
||||
streamNumber, streamNumberLength = decodeVarint(
|
||||
data[readPosition:readPosition + 10])
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
readPosition += streamNumberLength
|
||||
|
@ -393,7 +417,7 @@ def _checkAndSharePubkeyWithPeers(data):
|
|||
streamNumber, varintLength = decodeVarint(
|
||||
data[readPosition:readPosition + 10])
|
||||
readPosition += varintLength
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
if addressVersion >= 4:
|
||||
|
@ -430,7 +454,7 @@ def _checkAndShareBroadcastWithPeers(data):
|
|||
if broadcastVersion >= 2:
|
||||
streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
|
||||
readPosition += streamNumberLength
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
if broadcastVersion >= 3:
|
||||
|
@ -452,3 +476,10 @@ def _checkAndShareBroadcastWithPeers(data):
|
|||
# Now let's queue it to be processed ourselves.
|
||||
objectProcessorQueue.put((objectType,data))
|
||||
|
||||
# If you want to command all of the sendDataThreads to do something, like shutdown or send some data, this
|
||||
# function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are
|
||||
# responsible for putting their queue into (and out of) the sendDataQueues list.
|
||||
def broadcastToSendDataQueues(data):
|
||||
# logger.debug('running broadcastToSendDataQueues')
|
||||
for q in state.sendDataQueues:
|
||||
q.put(data)
|
||||
|
|
138
src/shared.py
138
src/shared.py
|
@ -8,14 +8,11 @@ useVeryEasyProofOfWorkForTesting = False # If you set this to True while on the
|
|||
|
||||
|
||||
# Libraries.
|
||||
import base64
|
||||
import collections
|
||||
import os
|
||||
import pickle
|
||||
import Queue
|
||||
import random
|
||||
from multiprocessing import active_children, Queue as mpQueue, Lock as mpLock
|
||||
import socket
|
||||
import sys
|
||||
import stat
|
||||
import threading
|
||||
|
@ -36,6 +33,8 @@ import shared
|
|||
from helper_sql import *
|
||||
from helper_threading import *
|
||||
from inventory import Inventory
|
||||
import protocol
|
||||
import state
|
||||
|
||||
|
||||
myECCryptorObjects = {}
|
||||
|
@ -52,9 +51,7 @@ parserLock = mpLock()
|
|||
addressGeneratorQueue = Queue.Queue()
|
||||
knownNodesLock = threading.Lock()
|
||||
knownNodes = {}
|
||||
sendDataQueues = [] #each sendData thread puts its queue in this list.
|
||||
printLock = threading.Lock()
|
||||
appdata = '' #holds the location of the application data storage directory
|
||||
statusIconColor = 'red'
|
||||
connectedHostsList = {} #List of hosts to which we are connected. Used to guarantee that the outgoingSynSender threads won't connect to the same remote node twice.
|
||||
shutdown = 0 #Set to 1 by the doCleanShutdown function. Used to tell the proof of work worker threads to exit.
|
||||
|
@ -86,27 +83,17 @@ daemon = False
|
|||
needToWriteKnownNodesToDisk = False # If True, the singleCleaner will write it to disk eventually.
|
||||
maximumLengthOfTimeToBotherResendingMessages = 0
|
||||
objectProcessorQueue = ObjectProcessorQueue() # receiveDataThreads dump objects they hear on the network into this queue to be processed.
|
||||
streamsInWhichIAmParticipating = {}
|
||||
timeOffsetWrongCount = 0
|
||||
|
||||
# sanity check, prevent doing ridiculous PoW
|
||||
# 20 million PoWs equals approximately 2 days on dev's dual R9 290
|
||||
ridiculousDifficulty = 20000000
|
||||
|
||||
#If changed, these values will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
|
||||
networkDefaultProofOfWorkNonceTrialsPerByte = 1000 #The amount of work that should be performed (and demanded) per byte of the payload.
|
||||
networkDefaultPayloadLengthExtraBytes = 1000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
|
||||
|
||||
# Remember here the RPC port read from namecoin.conf so we can restore to
|
||||
# it as default whenever the user changes the "method" selection for
|
||||
# namecoin integration to "namecoind".
|
||||
namecoinDefaultRpcPort = "8336"
|
||||
|
||||
# When using py2exe or py2app, the variable frozen is added to the sys
|
||||
# namespace. This can be used to setup a different code path for
|
||||
# binary distributions vs source distributions.
|
||||
frozen = getattr(sys,'frozen', None)
|
||||
|
||||
# If the trustedpeer option is specified in keys.dat then this will
|
||||
# contain a Peer which will be connected to instead of using the
|
||||
# addresses advertised by other peers. The client will only connect to
|
||||
|
@ -119,68 +106,6 @@ frozen = getattr(sys,'frozen', None)
|
|||
# security.
|
||||
trustedPeer = None
|
||||
|
||||
def lookupExeFolder():
|
||||
if frozen:
|
||||
if frozen == "macosx_app":
|
||||
# targetdir/Bitmessage.app/Contents/MacOS/Bitmessage
|
||||
exeFolder = path.dirname(path.dirname(path.dirname(path.dirname(sys.executable)))) + path.sep
|
||||
else:
|
||||
exeFolder = path.dirname(sys.executable) + path.sep
|
||||
elif __file__:
|
||||
exeFolder = path.dirname(__file__) + path.sep
|
||||
else:
|
||||
exeFolder = ''
|
||||
return exeFolder
|
||||
|
||||
def lookupAppdataFolder():
|
||||
APPNAME = "PyBitmessage"
|
||||
if "BITMESSAGE_HOME" in environ:
|
||||
dataFolder = environ["BITMESSAGE_HOME"]
|
||||
if dataFolder[-1] not in [os.path.sep, os.path.altsep]:
|
||||
dataFolder += os.path.sep
|
||||
elif sys.platform == 'darwin':
|
||||
if "HOME" in environ:
|
||||
dataFolder = path.join(os.environ["HOME"], "Library/Application Support/", APPNAME) + '/'
|
||||
else:
|
||||
stringToLog = 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
|
||||
if 'logger' in globals():
|
||||
logger.critical(stringToLog)
|
||||
else:
|
||||
print stringToLog
|
||||
sys.exit()
|
||||
|
||||
elif 'win32' in sys.platform or 'win64' in sys.platform:
|
||||
dataFolder = path.join(environ['APPDATA'].decode(sys.getfilesystemencoding(), 'ignore'), APPNAME) + path.sep
|
||||
else:
|
||||
from shutil import move
|
||||
try:
|
||||
dataFolder = path.join(environ["XDG_CONFIG_HOME"], APPNAME)
|
||||
except KeyError:
|
||||
dataFolder = path.join(environ["HOME"], ".config", APPNAME)
|
||||
|
||||
# Migrate existing data to the proper location if this is an existing install
|
||||
try:
|
||||
move(path.join(environ["HOME"], ".%s" % APPNAME), dataFolder)
|
||||
stringToLog = "Moving data folder to %s" % (dataFolder)
|
||||
if 'logger' in globals():
|
||||
logger.info(stringToLog)
|
||||
else:
|
||||
print stringToLog
|
||||
except IOError:
|
||||
# Old directory may not exist.
|
||||
pass
|
||||
dataFolder = dataFolder + '/'
|
||||
return dataFolder
|
||||
|
||||
def codePath():
|
||||
if frozen == "macosx_app":
|
||||
codePath = os.environ.get("RESOURCEPATH")
|
||||
elif frozen: # windows
|
||||
codePath = sys._MEIPASS
|
||||
else:
|
||||
codePath = os.path.dirname(__file__)
|
||||
return codePath
|
||||
|
||||
def isAddressInMyAddressBook(address):
|
||||
queryreturn = sqlQuery(
|
||||
'''select address from addressbook where address=?''',
|
||||
|
@ -236,7 +161,7 @@ def reloadMyAddressHashes():
|
|||
myAddressesByTag.clear()
|
||||
#myPrivateKeys.clear()
|
||||
|
||||
keyfileSecure = checkSensitiveFilePermissions(appdata + 'keys.dat')
|
||||
keyfileSecure = checkSensitiveFilePermissions(state.appdata + 'keys.dat')
|
||||
configSections = BMConfigParser().sections()
|
||||
hasEnabledKeys = False
|
||||
for addressInKeysFile in configSections:
|
||||
|
@ -262,7 +187,7 @@ def reloadMyAddressHashes():
|
|||
logger.error('Error in reloadMyAddressHashes: Can\'t handle address versions other than 2, 3, or 4.\n')
|
||||
|
||||
if not keyfileSecure:
|
||||
fixSensitiveFilePermissions(appdata + 'keys.dat', hasEnabledKeys)
|
||||
fixSensitiveFilePermissions(state.appdata + 'keys.dat', hasEnabledKeys)
|
||||
|
||||
def reloadBroadcastSendersForWhichImWatching():
|
||||
broadcastSendersForWhichImWatching.clear()
|
||||
|
@ -286,21 +211,6 @@ def reloadBroadcastSendersForWhichImWatching():
|
|||
privEncryptionKey = doubleHashOfAddressData[:32]
|
||||
MyECSubscriptionCryptorObjects[tag] = highlevelcrypto.makeCryptor(hexlify(privEncryptionKey))
|
||||
|
||||
def isProofOfWorkSufficient(data,
|
||||
nonceTrialsPerByte=0,
|
||||
payloadLengthExtraBytes=0):
|
||||
if nonceTrialsPerByte < networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||
nonceTrialsPerByte = networkDefaultProofOfWorkNonceTrialsPerByte
|
||||
if payloadLengthExtraBytes < networkDefaultPayloadLengthExtraBytes:
|
||||
payloadLengthExtraBytes = networkDefaultPayloadLengthExtraBytes
|
||||
endOfLifeTime, = unpack('>Q', data[8:16])
|
||||
TTL = endOfLifeTime - int(time.time())
|
||||
if TTL < 300:
|
||||
TTL = 300
|
||||
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
|
||||
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8])
|
||||
return POW <= 2 ** 64 / (nonceTrialsPerByte*(len(data) + payloadLengthExtraBytes + ((TTL*(len(data)+payloadLengthExtraBytes))/(2 ** 16))))
|
||||
|
||||
def doCleanShutdown():
|
||||
global shutdown, thisapp
|
||||
shutdown = 1 #Used to tell proof of work worker threads and the objectProcessorThread to exit.
|
||||
|
@ -308,7 +218,7 @@ def doCleanShutdown():
|
|||
parserInputQueue.put(None, False)
|
||||
except Queue.Full:
|
||||
pass
|
||||
broadcastToSendDataQueues((0, 'shutdown', 'no data'))
|
||||
protocol.broadcastToSendDataQueues((0, 'shutdown', 'no data'))
|
||||
objectProcessorQueue.put(('checkShutdownVariable', 'no data'))
|
||||
for thread in threading.enumerate():
|
||||
if thread.isAlive() and isinstance(thread, StoppableThread):
|
||||
|
@ -316,7 +226,7 @@ def doCleanShutdown():
|
|||
|
||||
knownNodesLock.acquire()
|
||||
UISignalQueue.put(('updateStatusBar','Saving the knownNodes list of peers to disk...'))
|
||||
output = open(appdata + 'knownnodes.dat', 'wb')
|
||||
output = open(state.appdata + 'knownnodes.dat', 'wb')
|
||||
logger.info('finished opening knownnodes.dat. Now pickle.dump')
|
||||
pickle.dump(knownNodes, output)
|
||||
logger.info('Completed pickle.dump. Closing output...')
|
||||
|
@ -359,14 +269,6 @@ def doCleanShutdown():
|
|||
else:
|
||||
logger.info('Core shutdown complete.')
|
||||
|
||||
# If you want to command all of the sendDataThreads to do something, like shutdown or send some data, this
|
||||
# function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are
|
||||
# responsible for putting their queue into (and out of) the sendDataQueues list.
|
||||
def broadcastToSendDataQueues(data):
|
||||
# logger.debug('running broadcastToSendDataQueues')
|
||||
for q in sendDataQueues:
|
||||
q.put(data)
|
||||
|
||||
def fixPotentiallyInvalidUTF8Data(text):
|
||||
try:
|
||||
unicode(text,'utf-8')
|
||||
|
@ -554,7 +456,7 @@ def checkAndShareObjectWithPeers(data):
|
|||
logger.info('The payload length of this object is too large (%s bytes). Ignoring it.' % len(data))
|
||||
return 0
|
||||
# Let us check to make sure that the proof of work is sufficient.
|
||||
if not isProofOfWorkSufficient(data):
|
||||
if not protocol.isProofOfWorkSufficient(data):
|
||||
logger.info('Proof of work is insufficient.')
|
||||
return 0
|
||||
|
||||
|
@ -597,7 +499,7 @@ def _checkAndShareUndefinedObjectWithPeers(data):
|
|||
readPosition += objectVersionLength
|
||||
streamNumber, streamNumberLength = decodeVarint(
|
||||
data[readPosition:readPosition + 9])
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
|
||||
|
@ -609,7 +511,7 @@ def _checkAndShareUndefinedObjectWithPeers(data):
|
|||
Inventory()[inventoryHash] = (
|
||||
objectType, streamNumber, data, embeddedTime,'')
|
||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
|
||||
|
||||
def _checkAndShareMsgWithPeers(data):
|
||||
|
@ -620,7 +522,7 @@ def _checkAndShareMsgWithPeers(data):
|
|||
readPosition += objectVersionLength
|
||||
streamNumber, streamNumberLength = decodeVarint(
|
||||
data[readPosition:readPosition + 9])
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
readPosition += streamNumberLength
|
||||
|
@ -633,7 +535,7 @@ def _checkAndShareMsgWithPeers(data):
|
|||
Inventory()[inventoryHash] = (
|
||||
objectType, streamNumber, data, embeddedTime,'')
|
||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
|
||||
# Now let's enqueue it to be processed ourselves.
|
||||
objectProcessorQueue.put((objectType,data))
|
||||
|
@ -651,7 +553,7 @@ def _checkAndShareGetpubkeyWithPeers(data):
|
|||
readPosition += addressVersionLength
|
||||
streamNumber, streamNumberLength = decodeVarint(
|
||||
data[readPosition:readPosition + 10])
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
readPosition += streamNumberLength
|
||||
|
@ -666,7 +568,7 @@ def _checkAndShareGetpubkeyWithPeers(data):
|
|||
objectType, streamNumber, data, embeddedTime,'')
|
||||
# This getpubkey request is valid. Forward to peers.
|
||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
|
||||
# Now let's queue it to be processed ourselves.
|
||||
objectProcessorQueue.put((objectType,data))
|
||||
|
@ -682,7 +584,7 @@ def _checkAndSharePubkeyWithPeers(data):
|
|||
streamNumber, varintLength = decodeVarint(
|
||||
data[readPosition:readPosition + 10])
|
||||
readPosition += varintLength
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
if addressVersion >= 4:
|
||||
|
@ -700,7 +602,7 @@ def _checkAndSharePubkeyWithPeers(data):
|
|||
objectType, streamNumber, data, embeddedTime, tag)
|
||||
# This object is valid. Forward it to peers.
|
||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
|
||||
|
||||
# Now let's queue it to be processed ourselves.
|
||||
|
@ -719,7 +621,7 @@ def _checkAndShareBroadcastWithPeers(data):
|
|||
if broadcastVersion >= 2:
|
||||
streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
|
||||
readPosition += streamNumberLength
|
||||
if not streamNumber in streamsInWhichIAmParticipating:
|
||||
if not streamNumber in state.streamsInWhichIAmParticipating:
|
||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||
return
|
||||
if broadcastVersion >= 3:
|
||||
|
@ -736,19 +638,19 @@ def _checkAndShareBroadcastWithPeers(data):
|
|||
objectType, streamNumber, data, embeddedTime, tag)
|
||||
# This object is valid. Forward it to peers.
|
||||
logger.debug('advertising inv with hash: %s' % hexlify(inventoryHash))
|
||||
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
protocol.broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||
|
||||
# Now let's queue it to be processed ourselves.
|
||||
objectProcessorQueue.put((objectType,data))
|
||||
|
||||
def openKeysFile():
|
||||
if 'linux' in sys.platform:
|
||||
subprocess.call(["xdg-open", shared.appdata + 'keys.dat'])
|
||||
subprocess.call(["xdg-open", state.appdata + 'keys.dat'])
|
||||
else:
|
||||
os.startfile(shared.appdata + 'keys.dat')
|
||||
os.startfile(state.appdata + 'keys.dat')
|
||||
|
||||
def writeKeysFile():
|
||||
fileName = shared.appdata + 'keys.dat'
|
||||
fileName = state.appdata + 'keys.dat'
|
||||
fileNameBak = fileName + "." + datetime.datetime.now().strftime("%Y%j%H%M%S%f") + '.bak'
|
||||
# create a backup copy to prevent the accidental loss due to the disk write failure
|
||||
try:
|
||||
|
|
|
@ -6,6 +6,7 @@ from multiprocessing import Process
|
|||
import os
|
||||
import sys
|
||||
import shared
|
||||
import state
|
||||
|
||||
try:
|
||||
import fcntl # @UnresolvedImport
|
||||
|
@ -24,7 +25,7 @@ class singleinstance:
|
|||
self.counter = 0
|
||||
self.daemon = daemon
|
||||
self.lockPid = None
|
||||
self.lockfile = os.path.normpath(os.path.join(shared.appdata, 'singleton%s.lock' % flavor_id))
|
||||
self.lockfile = os.path.normpath(os.path.join(state.appdata, 'singleton%s.lock' % flavor_id))
|
||||
|
||||
if not self.daemon and not shared.curses:
|
||||
# Tells the already running (if any) application to get focus.
|
||||
|
|
|
@ -1,7 +1,14 @@
|
|||
neededPubkeys = {}
|
||||
streamsInWhichIAmParticipating = {}
|
||||
sendDataQueues = [] #each sendData thread puts its queue in this list.
|
||||
|
||||
# For UPnP
|
||||
extPort = None
|
||||
|
||||
# for Tor hidden service
|
||||
socksIP = None
|
||||
|
||||
# Network protocols last check failed
|
||||
networkProtocolLastFailed = {'IPv4': 0, 'IPv6': 0, 'onion': 0}
|
||||
|
||||
appdata = '' #holds the location of the application data storage directory
|
||||
|
|
Reference in New Issue
Block a user