commit
12b4015457
|
@ -68,23 +68,48 @@ def encodeVarint(integer):
|
||||||
if integer >= 18446744073709551616:
|
if integer >= 18446744073709551616:
|
||||||
print 'varint cannot be >= 18446744073709551616'
|
print 'varint cannot be >= 18446744073709551616'
|
||||||
raise SystemExit
|
raise SystemExit
|
||||||
|
|
||||||
|
class varintDecodeError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
def decodeVarint(data):
|
def decodeVarint(data):
|
||||||
|
"""
|
||||||
|
Decodes an encoded varint to an integer and returns it.
|
||||||
|
Per protocol v3, the encoded value must be encoded with
|
||||||
|
the minimum amount of data possible or else it is malformed.
|
||||||
|
Returns a tuple: (theEncodedValue, theSizeOfTheVarintInBytes)
|
||||||
|
"""
|
||||||
|
|
||||||
if len(data) == 0:
|
if len(data) == 0:
|
||||||
return (0,0)
|
return (0,0)
|
||||||
firstByte, = unpack('>B',data[0:1])
|
firstByte, = unpack('>B',data[0:1])
|
||||||
if firstByte < 253:
|
if firstByte < 253:
|
||||||
|
# encodes 0 to 252
|
||||||
return (firstByte,1) #the 1 is the length of the varint
|
return (firstByte,1) #the 1 is the length of the varint
|
||||||
if firstByte == 253:
|
if firstByte == 253:
|
||||||
a, = unpack('>H',data[1:3])
|
# encodes 253 to 65535
|
||||||
return (a,3)
|
if len(data) < 3:
|
||||||
|
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 3.' % (firstByte, len(data)))
|
||||||
|
encodedValue, = unpack('>H',data[1:3])
|
||||||
|
if encodedValue < 253:
|
||||||
|
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
|
||||||
|
return (encodedValue,3)
|
||||||
if firstByte == 254:
|
if firstByte == 254:
|
||||||
a, = unpack('>I',data[1:5])
|
# encodes 65536 to 4294967295
|
||||||
return (a,5)
|
if len(data) < 5:
|
||||||
|
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 5.' % (firstByte, len(data)))
|
||||||
|
encodedValue, = unpack('>I',data[1:5])
|
||||||
|
if encodedValue < 65536:
|
||||||
|
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
|
||||||
|
return (encodedValue,5)
|
||||||
if firstByte == 255:
|
if firstByte == 255:
|
||||||
a, = unpack('>Q',data[1:9])
|
# encodes 4294967296 to 18446744073709551615
|
||||||
return (a,9)
|
if len(data) < 9:
|
||||||
|
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 9.' % (firstByte, len(data)))
|
||||||
|
encodedValue, = unpack('>Q',data[1:9])
|
||||||
|
if encodedValue < 4294967296:
|
||||||
|
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
|
||||||
|
return (encodedValue,9)
|
||||||
|
|
||||||
|
|
||||||
def calculateInventoryHash(data):
|
def calculateInventoryHash(data):
|
||||||
|
@ -107,23 +132,17 @@ def encodeAddress(version,stream,ripe):
|
||||||
raise Exception("Programming error in encodeAddress: The length of a given ripe hash was not 20.")
|
raise Exception("Programming error in encodeAddress: The length of a given ripe hash was not 20.")
|
||||||
ripe = ripe.lstrip('\x00')
|
ripe = ripe.lstrip('\x00')
|
||||||
|
|
||||||
a = encodeVarint(version) + encodeVarint(stream) + ripe
|
storedBinaryData = encodeVarint(version) + encodeVarint(stream) + ripe
|
||||||
|
|
||||||
|
# Generate the checksum
|
||||||
sha = hashlib.new('sha512')
|
sha = hashlib.new('sha512')
|
||||||
sha.update(a)
|
sha.update(storedBinaryData)
|
||||||
currentHash = sha.digest()
|
currentHash = sha.digest()
|
||||||
#print 'sha after first hashing: ', sha.hexdigest()
|
|
||||||
sha = hashlib.new('sha512')
|
sha = hashlib.new('sha512')
|
||||||
sha.update(currentHash)
|
sha.update(currentHash)
|
||||||
#print 'sha after second hashing: ', sha.hexdigest()
|
|
||||||
|
|
||||||
checksum = sha.digest()[0:4]
|
checksum = sha.digest()[0:4]
|
||||||
#print 'len(a) = ', len(a)
|
|
||||||
#print 'checksum = ', checksum.encode('hex')
|
|
||||||
#print 'len(checksum) = ', len(checksum)
|
|
||||||
|
|
||||||
asInt = int(a.encode('hex') + checksum.encode('hex'),16)
|
asInt = int(storedBinaryData.encode('hex') + checksum.encode('hex'),16)
|
||||||
#asInt = int(checksum.encode('hex') + a.encode('hex'),16)
|
|
||||||
# print asInt
|
|
||||||
return 'BM-'+ encodeBase58(asInt)
|
return 'BM-'+ encodeBase58(asInt)
|
||||||
|
|
||||||
def decodeAddress(address):
|
def decodeAddress(address):
|
||||||
|
@ -163,7 +182,12 @@ def decodeAddress(address):
|
||||||
#else:
|
#else:
|
||||||
# print 'checksum PASSED'
|
# print 'checksum PASSED'
|
||||||
|
|
||||||
addressVersionNumber, bytesUsedByVersionNumber = decodeVarint(data[:9])
|
try:
|
||||||
|
addressVersionNumber, bytesUsedByVersionNumber = decodeVarint(data[:9])
|
||||||
|
except varintDecodeError as e:
|
||||||
|
print e
|
||||||
|
status = 'varintmalformed'
|
||||||
|
return status,0,0,""
|
||||||
#print 'addressVersionNumber', addressVersionNumber
|
#print 'addressVersionNumber', addressVersionNumber
|
||||||
#print 'bytesUsedByVersionNumber', bytesUsedByVersionNumber
|
#print 'bytesUsedByVersionNumber', bytesUsedByVersionNumber
|
||||||
|
|
||||||
|
@ -176,32 +200,42 @@ def decodeAddress(address):
|
||||||
status = 'versiontoohigh'
|
status = 'versiontoohigh'
|
||||||
return status,0,0,""
|
return status,0,0,""
|
||||||
|
|
||||||
streamNumber, bytesUsedByStreamNumber = decodeVarint(data[bytesUsedByVersionNumber:])
|
try:
|
||||||
|
streamNumber, bytesUsedByStreamNumber = decodeVarint(data[bytesUsedByVersionNumber:])
|
||||||
|
except varintDecodeError as e:
|
||||||
|
print e
|
||||||
|
status = 'varintmalformed'
|
||||||
|
return status,0,0,""
|
||||||
#print streamNumber
|
#print streamNumber
|
||||||
status = 'success'
|
status = 'success'
|
||||||
if addressVersionNumber == 1:
|
if addressVersionNumber == 1:
|
||||||
return status,addressVersionNumber,streamNumber,data[-24:-4]
|
return status,addressVersionNumber,streamNumber,data[-24:-4]
|
||||||
elif addressVersionNumber == 2 or addressVersionNumber == 3:
|
elif addressVersionNumber == 2 or addressVersionNumber == 3:
|
||||||
if len(data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]) == 19:
|
embeddedRipeData = data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
|
||||||
return status,addressVersionNumber,streamNumber,'\x00'+data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
|
if len(embeddedRipeData) == 19:
|
||||||
elif len(data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]) == 20:
|
return status,addressVersionNumber,streamNumber,'\x00'+embeddedRipeData
|
||||||
return status,addressVersionNumber,streamNumber,data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
|
elif len(embeddedRipeData) == 20:
|
||||||
elif len(data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]) == 18:
|
return status,addressVersionNumber,streamNumber,embeddedRipeData
|
||||||
return status,addressVersionNumber,streamNumber,'\x00\x00'+data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
|
elif len(embeddedRipeData) == 18:
|
||||||
elif len(data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]) < 18:
|
return status,addressVersionNumber,streamNumber,'\x00\x00'+embeddedRipeData
|
||||||
|
elif len(embeddedRipeData) < 18:
|
||||||
return 'ripetooshort',0,0,""
|
return 'ripetooshort',0,0,""
|
||||||
elif len(data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]) > 20:
|
elif len(embeddedRipeData) > 20:
|
||||||
return 'ripetoolong',0,0,""
|
return 'ripetoolong',0,0,""
|
||||||
else:
|
else:
|
||||||
return 'otherproblem',0,0,""
|
return 'otherproblem',0,0,""
|
||||||
elif addressVersionNumber == 4:
|
elif addressVersionNumber == 4:
|
||||||
if len(data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]) > 20:
|
embeddedRipeData = data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
|
||||||
|
if embeddedRipeData[0:1] == '\x00':
|
||||||
|
# In order to enforce address non-malleability, encoded RIPE data must have NULL bytes removed from the front
|
||||||
|
return 'encodingproblem',0,0,""
|
||||||
|
elif len(embeddedRipeData) > 20:
|
||||||
return 'ripetoolong',0,0,""
|
return 'ripetoolong',0,0,""
|
||||||
elif len(data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]) < 4:
|
elif len(embeddedRipeData) < 4:
|
||||||
return 'ripetooshort',0,0,""
|
return 'ripetooshort',0,0,""
|
||||||
else:
|
else:
|
||||||
x00string = '\x00' * (20 - len(data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]))
|
x00string = '\x00' * (20 - len(embeddedRipeData))
|
||||||
return status,addressVersionNumber,streamNumber,x00string+data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
|
return status,addressVersionNumber,streamNumber,x00string+embeddedRipeData
|
||||||
|
|
||||||
def addBMIfNotPresent(address):
|
def addBMIfNotPresent(address):
|
||||||
address = str(address).strip()
|
address = str(address).strip()
|
||||||
|
|
24
src/api.py
24
src/api.py
|
@ -17,7 +17,7 @@ import json
|
||||||
|
|
||||||
import shared
|
import shared
|
||||||
import time
|
import time
|
||||||
from addresses import decodeAddress,addBMIfNotPresent,decodeVarint,calculateInventoryHash
|
from addresses import decodeAddress,addBMIfNotPresent,decodeVarint,calculateInventoryHash,varintDecodeError
|
||||||
import helper_inbox
|
import helper_inbox
|
||||||
import helper_sent
|
import helper_sent
|
||||||
import hashlib
|
import hashlib
|
||||||
|
@ -139,6 +139,8 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
raise APIError(9, 'Invalid characters in address: ' + address)
|
raise APIError(9, 'Invalid characters in address: ' + address)
|
||||||
if status == 'versiontoohigh':
|
if status == 'versiontoohigh':
|
||||||
raise APIError(10, 'Address version number too high (or zero) in address: ' + address)
|
raise APIError(10, 'Address version number too high (or zero) in address: ' + address)
|
||||||
|
if status == 'varintmalformed':
|
||||||
|
raise APIError(26, 'Malformed varint in address: ' + address)
|
||||||
raise APIError(7, 'Could not decode address: ' + address + ' : ' + status)
|
raise APIError(7, 'Could not decode address: ' + address + ' : ' + status)
|
||||||
if addressVersionNumber < 2 or addressVersionNumber > 4:
|
if addressVersionNumber < 2 or addressVersionNumber > 4:
|
||||||
raise APIError(11, 'The address version number currently must be 2, 3 or 4. Others aren\'t supported. Check the address.')
|
raise APIError(11, 'The address version number currently must be 2, 3 or 4. Others aren\'t supported. Check the address.')
|
||||||
|
@ -621,6 +623,8 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
raise APIError(6, 'The encoding type must be 2 because that is the only one this program currently supports.')
|
raise APIError(6, 'The encoding type must be 2 because that is the only one this program currently supports.')
|
||||||
subject = self._decode(subject, "base64")
|
subject = self._decode(subject, "base64")
|
||||||
message = self._decode(message, "base64")
|
message = self._decode(message, "base64")
|
||||||
|
if len(subject + message) > (2 ** 18 - 500):
|
||||||
|
raise APIError(27, 'Message is too long.')
|
||||||
toAddress = addBMIfNotPresent(toAddress)
|
toAddress = addBMIfNotPresent(toAddress)
|
||||||
fromAddress = addBMIfNotPresent(fromAddress)
|
fromAddress = addBMIfNotPresent(fromAddress)
|
||||||
status, addressVersionNumber, streamNumber, toRipe = self._verifyAddress(toAddress)
|
status, addressVersionNumber, streamNumber, toRipe = self._verifyAddress(toAddress)
|
||||||
|
@ -664,7 +668,8 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
raise APIError(6, 'The encoding type must be 2 because that is the only one this program currently supports.')
|
raise APIError(6, 'The encoding type must be 2 because that is the only one this program currently supports.')
|
||||||
subject = self._decode(subject, "base64")
|
subject = self._decode(subject, "base64")
|
||||||
message = self._decode(message, "base64")
|
message = self._decode(message, "base64")
|
||||||
|
if len(subject + message) > (2 ** 18 - 500):
|
||||||
|
raise APIError(27, 'Message is too long.')
|
||||||
fromAddress = addBMIfNotPresent(fromAddress)
|
fromAddress = addBMIfNotPresent(fromAddress)
|
||||||
self._verifyAddress(fromAddress)
|
self._verifyAddress(fromAddress)
|
||||||
try:
|
try:
|
||||||
|
@ -777,9 +782,10 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
encryptedPayload = pack('>Q', nonce) + encryptedPayload
|
encryptedPayload = pack('>Q', nonce) + encryptedPayload
|
||||||
toStreamNumber = decodeVarint(encryptedPayload[16:26])[0]
|
toStreamNumber = decodeVarint(encryptedPayload[16:26])[0]
|
||||||
inventoryHash = calculateInventoryHash(encryptedPayload)
|
inventoryHash = calculateInventoryHash(encryptedPayload)
|
||||||
objectType = 'msg'
|
objectType = 2
|
||||||
|
TTL = 2.5 * 24 * 60 * 60
|
||||||
shared.inventory[inventoryHash] = (
|
shared.inventory[inventoryHash] = (
|
||||||
objectType, toStreamNumber, encryptedPayload, int(time.time()),'')
|
objectType, toStreamNumber, encryptedPayload, int(time.time()) + TTL,'')
|
||||||
shared.inventorySets[toStreamNumber].add(inventoryHash)
|
shared.inventorySets[toStreamNumber].add(inventoryHash)
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Broadcasting inv for msg(API disseminatePreEncryptedMsg command):', inventoryHash.encode('hex')
|
print 'Broadcasting inv for msg(API disseminatePreEncryptedMsg command):', inventoryHash.encode('hex')
|
||||||
|
@ -814,10 +820,11 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
pubkeyReadPosition += addressVersionLength
|
pubkeyReadPosition += addressVersionLength
|
||||||
pubkeyStreamNumber = decodeVarint(payload[pubkeyReadPosition:pubkeyReadPosition+10])[0]
|
pubkeyStreamNumber = decodeVarint(payload[pubkeyReadPosition:pubkeyReadPosition+10])[0]
|
||||||
inventoryHash = calculateInventoryHash(payload)
|
inventoryHash = calculateInventoryHash(payload)
|
||||||
objectType = 'pubkey'
|
objectType = 1
|
||||||
#todo: support v4 pubkeys
|
#todo: support v4 pubkeys
|
||||||
|
TTL = 28 * 24 * 60 * 60
|
||||||
shared.inventory[inventoryHash] = (
|
shared.inventory[inventoryHash] = (
|
||||||
objectType, pubkeyStreamNumber, payload, int(time.time()),'')
|
objectType, pubkeyStreamNumber, payload, int(time.time()) + TTL,'')
|
||||||
shared.inventorySets[pubkeyStreamNumber].add(inventoryHash)
|
shared.inventorySets[pubkeyStreamNumber].add(inventoryHash)
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'broadcasting inv within API command disseminatePubkey with hash:', inventoryHash.encode('hex')
|
print 'broadcasting inv within API command disseminatePubkey with hash:', inventoryHash.encode('hex')
|
||||||
|
@ -839,7 +846,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
# use it we'll need to fill out a field in our inventory database
|
# use it we'll need to fill out a field in our inventory database
|
||||||
# which is blank by default (first20bytesofencryptedmessage).
|
# which is blank by default (first20bytesofencryptedmessage).
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''SELECT hash, payload FROM inventory WHERE tag = '' and objecttype = 'msg' ; ''')
|
'''SELECT hash, payload FROM inventory WHERE tag = '' and objecttype = 2 ; ''')
|
||||||
with SqlBulkExecute() as sql:
|
with SqlBulkExecute() as sql:
|
||||||
for row in queryreturn:
|
for row in queryreturn:
|
||||||
hash01, payload = row
|
hash01, payload = row
|
||||||
|
@ -906,6 +913,9 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||||
return self._handle_request(method, params)
|
return self._handle_request(method, params)
|
||||||
except APIError as e:
|
except APIError as e:
|
||||||
return str(e)
|
return str(e)
|
||||||
|
except varintDecodeError as e:
|
||||||
|
logger.error(e)
|
||||||
|
return "API Error 0026: Data contains a malformed varint. Some details: %s" % e
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return "API Error 0021: Unexpected API Failure - %s" % str(e)
|
return "API Error 0021: Unexpected API Failure - %s" % str(e)
|
||||||
|
|
|
@ -742,6 +742,8 @@ def sendMessage(sender="", recv="", broadcast=None, subject="", body="", reply=F
|
||||||
err += "Some data encoded in the address is too short. There might be something wrong with the software of your acquaintance."
|
err += "Some data encoded in the address is too short. There might be something wrong with the software of your acquaintance."
|
||||||
elif status == "ripetoolong":
|
elif status == "ripetoolong":
|
||||||
err += "Some data encoded in the address is too long. There might be something wrong with the software of your acquaintance."
|
err += "Some data encoded in the address is too long. There might be something wrong with the software of your acquaintance."
|
||||||
|
elif status == "varintmalformed":
|
||||||
|
err += "Some data encoded in the address is malformed. There might be something wrong with the software of your acquaintance."
|
||||||
else:
|
else:
|
||||||
err += "It is unknown what is wrong with the address."
|
err += "It is unknown what is wrong with the address."
|
||||||
d.scrollbox(unicode(err), exit_label="Continue")
|
d.scrollbox(unicode(err), exit_label="Continue")
|
||||||
|
|
|
@ -154,9 +154,9 @@ selfInitiatedConnections = {}
|
||||||
|
|
||||||
if shared.useVeryEasyProofOfWorkForTesting:
|
if shared.useVeryEasyProofOfWorkForTesting:
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte = int(
|
shared.networkDefaultProofOfWorkNonceTrialsPerByte = int(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte / 16)
|
shared.networkDefaultProofOfWorkNonceTrialsPerByte / 100)
|
||||||
shared.networkDefaultPayloadLengthExtraBytes = int(
|
shared.networkDefaultPayloadLengthExtraBytes = int(
|
||||||
shared.networkDefaultPayloadLengthExtraBytes / 7000)
|
shared.networkDefaultPayloadLengthExtraBytes / 100)
|
||||||
|
|
||||||
class Main:
|
class Main:
|
||||||
def start(self, daemon=False):
|
def start(self, daemon=False):
|
||||||
|
|
|
@ -1346,26 +1346,23 @@ class MyForm(QtGui.QMainWindow):
|
||||||
if self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text() == "":
|
if self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text() == "":
|
||||||
QMessageBox.about(self, _translate("MainWindow", "bad passphrase"), _translate(
|
QMessageBox.about(self, _translate("MainWindow", "bad passphrase"), _translate(
|
||||||
"MainWindow", "You must type your passphrase. If you don\'t have one then this is not the form for you."))
|
"MainWindow", "You must type your passphrase. If you don\'t have one then this is not the form for you."))
|
||||||
else:
|
return
|
||||||
streamNumberForAddress = int(
|
streamNumberForAddress = int(
|
||||||
self.regenerateAddressesDialogInstance.ui.lineEditStreamNumber.text())
|
self.regenerateAddressesDialogInstance.ui.lineEditStreamNumber.text())
|
||||||
try:
|
try:
|
||||||
addressVersionNumber = int(
|
addressVersionNumber = int(
|
||||||
self.regenerateAddressesDialogInstance.ui.lineEditAddressVersionNumber.text())
|
self.regenerateAddressesDialogInstance.ui.lineEditAddressVersionNumber.text())
|
||||||
except:
|
except:
|
||||||
QMessageBox.about(self, _translate("MainWindow", "Bad address version number"), _translate(
|
QMessageBox.about(self, _translate("MainWindow", "Bad address version number"), _translate(
|
||||||
"MainWindow", "Your address version number must be a number: either 3 or 4."))
|
"MainWindow", "Your address version number must be a number: either 3 or 4."))
|
||||||
if addressVersionNumber < 3 or addressVersionNumber > 4:
|
return
|
||||||
QMessageBox.about(self, _translate("MainWindow", "Bad address version number"), _translate(
|
if addressVersionNumber < 3 or addressVersionNumber > 4:
|
||||||
"MainWindow", "Your address version number must be either 3 or 4."))
|
QMessageBox.about(self, _translate("MainWindow", "Bad address version number"), _translate(
|
||||||
# self.addressGenerator = addressGenerator()
|
"MainWindow", "Your address version number must be either 3 or 4."))
|
||||||
# self.addressGenerator.setup(addressVersionNumber,streamNumberForAddress,"unused address",self.regenerateAddressesDialogInstance.ui.spinBoxNumberOfAddressesToMake.value(),self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text().toUtf8(),self.regenerateAddressesDialogInstance.ui.checkBoxEighteenByteRipe.isChecked())
|
return
|
||||||
# QtCore.QObject.connect(self.addressGenerator, SIGNAL("writeNewAddressToTable(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.writeNewAddressToTable)
|
shared.addressGeneratorQueue.put(('createDeterministicAddresses', addressVersionNumber, streamNumberForAddress, "regenerated deterministic address", self.regenerateAddressesDialogInstance.ui.spinBoxNumberOfAddressesToMake.value(
|
||||||
# QtCore.QObject.connect(self.addressGenerator, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
|
), self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text().toUtf8(), self.regenerateAddressesDialogInstance.ui.checkBoxEighteenByteRipe.isChecked()))
|
||||||
# self.addressGenerator.start()
|
self.ui.tabWidget.setCurrentIndex(3)
|
||||||
shared.addressGeneratorQueue.put(('createDeterministicAddresses', addressVersionNumber, streamNumberForAddress, "regenerated deterministic address", self.regenerateAddressesDialogInstance.ui.spinBoxNumberOfAddressesToMake.value(
|
|
||||||
), self.regenerateAddressesDialogInstance.ui.lineEditPassphrase.text().toUtf8(), self.regenerateAddressesDialogInstance.ui.checkBoxEighteenByteRipe.isChecked()))
|
|
||||||
self.ui.tabWidget.setCurrentIndex(3)
|
|
||||||
|
|
||||||
def click_actionJoinChan(self):
|
def click_actionJoinChan(self):
|
||||||
self.newChanDialogInstance = newChanDialog(self)
|
self.newChanDialogInstance = newChanDialog(self)
|
||||||
|
@ -1842,6 +1839,17 @@ class MyForm(QtGui.QMainWindow):
|
||||||
subject = str(self.ui.lineEditSubject.text().toUtf8())
|
subject = str(self.ui.lineEditSubject.text().toUtf8())
|
||||||
message = str(
|
message = str(
|
||||||
self.ui.textEditMessage.document().toPlainText().toUtf8())
|
self.ui.textEditMessage.document().toPlainText().toUtf8())
|
||||||
|
"""
|
||||||
|
The whole network message must fit in 2^18 bytes. Let's assume 500
|
||||||
|
bytes of overhead. If someone wants to get that too an exact
|
||||||
|
number you are welcome to but I think that it would be a better
|
||||||
|
use of time to support message continuation so that users can
|
||||||
|
send messages of any length.
|
||||||
|
"""
|
||||||
|
if len(message) > (2 ** 18 - 500):
|
||||||
|
QMessageBox.about(self, _translate("MainWindow", "Message too long"), _translate(
|
||||||
|
"MainWindow", "The message that you are trying to send is too long by %1 bytes. (The maximum is 261644 bytes). Please cut it down before sending.").arg(len(message) - (2 ** 18 - 500)))
|
||||||
|
return
|
||||||
if self.ui.radioButtonSpecific.isChecked(): # To send a message to specific people (rather than broadcast)
|
if self.ui.radioButtonSpecific.isChecked(): # To send a message to specific people (rather than broadcast)
|
||||||
toAddressesList = [s.strip()
|
toAddressesList = [s.strip()
|
||||||
for s in toAddresses.replace(',', ';').split(';')]
|
for s in toAddresses.replace(',', ';').split(';')]
|
||||||
|
@ -1873,6 +1881,9 @@ class MyForm(QtGui.QMainWindow):
|
||||||
elif status == 'ripetoolong':
|
elif status == 'ripetoolong':
|
||||||
self.statusBar().showMessage(_translate(
|
self.statusBar().showMessage(_translate(
|
||||||
"MainWindow", "Error: Some data encoded in the address %1 is too long. There might be something wrong with the software of your acquaintance.").arg(toAddress))
|
"MainWindow", "Error: Some data encoded in the address %1 is too long. There might be something wrong with the software of your acquaintance.").arg(toAddress))
|
||||||
|
elif status == 'varintmalformed':
|
||||||
|
self.statusBar().showMessage(_translate(
|
||||||
|
"MainWindow", "Error: Some data encoded in the address %1 is malformed. There might be something wrong with the software of your acquaintance.").arg(toAddress))
|
||||||
else:
|
else:
|
||||||
self.statusBar().showMessage(_translate(
|
self.statusBar().showMessage(_translate(
|
||||||
"MainWindow", "Error: Something is wrong with the address %1.").arg(toAddress))
|
"MainWindow", "Error: Something is wrong with the address %1.").arg(toAddress))
|
||||||
|
@ -2211,10 +2222,10 @@ class MyForm(QtGui.QMainWindow):
|
||||||
addressVersion) + encodeVarint(streamNumber) + ripe).digest()).digest()
|
addressVersion) + encodeVarint(streamNumber) + ripe).digest()).digest()
|
||||||
tag = doubleHashOfAddressData[32:]
|
tag = doubleHashOfAddressData[32:]
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''select payload from inventory where objecttype='broadcast' and tag=?''', tag)
|
'''select payload from inventory where objecttype=3 and tag=?''', tag)
|
||||||
for row in queryreturn:
|
for row in queryreturn:
|
||||||
payload, = row
|
payload, = row
|
||||||
objectType = 'broadcast'
|
objectType = 3
|
||||||
with shared.objectProcessorQueueSizeLock:
|
with shared.objectProcessorQueueSizeLock:
|
||||||
shared.objectProcessorQueueSize += len(payload)
|
shared.objectProcessorQueueSize += len(payload)
|
||||||
shared.objectProcessorQueue.put((objectType,payload))
|
shared.objectProcessorQueue.put((objectType,payload))
|
||||||
|
@ -2308,6 +2319,15 @@ class MyForm(QtGui.QMainWindow):
|
||||||
self.settingsDialogInstance.ui.lineEditSocksPassword.text()))
|
self.settingsDialogInstance.ui.lineEditSocksPassword.text()))
|
||||||
shared.config.set('bitmessagesettings', 'sockslisten', str(
|
shared.config.set('bitmessagesettings', 'sockslisten', str(
|
||||||
self.settingsDialogInstance.ui.checkBoxSocksListen.isChecked()))
|
self.settingsDialogInstance.ui.checkBoxSocksListen.isChecked()))
|
||||||
|
try:
|
||||||
|
# Rounding to integers just for aesthetics
|
||||||
|
shared.config.set('bitmessagesettings', 'maxdownloadrate', str(
|
||||||
|
int(float(self.settingsDialogInstance.ui.lineEditMaxDownloadRate.text()))))
|
||||||
|
shared.config.set('bitmessagesettings', 'maxuploadrate', str(
|
||||||
|
int(float(self.settingsDialogInstance.ui.lineEditMaxUploadRate.text()))))
|
||||||
|
except:
|
||||||
|
QMessageBox.about(self, _translate("MainWindow", "Number needed"), _translate(
|
||||||
|
"MainWindow", "Your maximum download and upload rate must be numbers. Ignoring what you typed."))
|
||||||
|
|
||||||
shared.config.set('bitmessagesettings', 'namecoinrpctype',
|
shared.config.set('bitmessagesettings', 'namecoinrpctype',
|
||||||
self.settingsDialogInstance.getNamecoinType())
|
self.settingsDialogInstance.getNamecoinType())
|
||||||
|
@ -2319,19 +2339,39 @@ class MyForm(QtGui.QMainWindow):
|
||||||
self.settingsDialogInstance.ui.lineEditNamecoinUser.text()))
|
self.settingsDialogInstance.ui.lineEditNamecoinUser.text()))
|
||||||
shared.config.set('bitmessagesettings', 'namecoinrpcpassword', str(
|
shared.config.set('bitmessagesettings', 'namecoinrpcpassword', str(
|
||||||
self.settingsDialogInstance.ui.lineEditNamecoinPassword.text()))
|
self.settingsDialogInstance.ui.lineEditNamecoinPassword.text()))
|
||||||
|
|
||||||
|
# Demanded difficulty tab
|
||||||
if float(self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) >= 1:
|
if float(self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) >= 1:
|
||||||
shared.config.set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(int(float(
|
shared.config.set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
self.settingsDialogInstance.ui.lineEditTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||||
if float(self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) >= 1:
|
if float(self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) >= 1:
|
||||||
shared.config.set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(int(float(
|
shared.config.set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
|
self.settingsDialogInstance.ui.lineEditSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
|
||||||
|
|
||||||
|
acceptableDifficultyChanged = False
|
||||||
|
|
||||||
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) == 0:
|
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) == 0:
|
||||||
shared.config.set('bitmessagesettings', 'maxacceptablenoncetrialsperbyte', str(int(float(
|
if shared.config.get('bitmessagesettings','maxacceptablenoncetrialsperbyte') != str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)):
|
||||||
|
# the user changed the max acceptable total difficulty
|
||||||
|
acceptableDifficultyChanged = True
|
||||||
|
shared.config.set('bitmessagesettings', 'maxacceptablenoncetrialsperbyte', str(int(float(
|
||||||
|
self.settingsDialogInstance.ui.lineEditMaxAcceptableTotalDifficulty.text()) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||||
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) == 0:
|
if float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) >= 1 or float(self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) == 0:
|
||||||
shared.config.set('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', str(int(float(
|
if shared.config.get('bitmessagesettings','maxacceptablepayloadlengthextrabytes') != str(int(float(
|
||||||
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
|
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)):
|
||||||
|
# the user changed the max acceptable small message difficulty
|
||||||
|
acceptableDifficultyChanged = True
|
||||||
|
shared.config.set('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', str(int(float(
|
||||||
|
self.settingsDialogInstance.ui.lineEditMaxAcceptableSmallMessageDifficulty.text()) * shared.networkDefaultPayloadLengthExtraBytes)))
|
||||||
|
if acceptableDifficultyChanged:
|
||||||
|
# It might now be possible to send msgs which were previously marked as toodifficult.
|
||||||
|
# Let us change them to 'msgqueued'. The singleWorker will try to send them and will again
|
||||||
|
# mark them as toodifficult if the receiver's required difficulty is still higher than
|
||||||
|
# we are willing to do.
|
||||||
|
sqlExecute('''UPDATE sent SET status='msgqueued' WHERE status='toodifficult' ''')
|
||||||
|
shared.workerQueue.put(('sendmessage', ''))
|
||||||
|
|
||||||
#start:UI setting to stop trying to send messages after X days/months
|
#start:UI setting to stop trying to send messages after X days/months
|
||||||
# I'm open to changing this UI to something else if someone has a better idea.
|
# I'm open to changing this UI to something else if someone has a better idea.
|
||||||
if ((self.settingsDialogInstance.ui.lineEditDays.text()=='') and (self.settingsDialogInstance.ui.lineEditMonths.text()=='')):#We need to handle this special case. Bitmessage has its default behavior. The input is blank/blank
|
if ((self.settingsDialogInstance.ui.lineEditDays.text()=='') and (self.settingsDialogInstance.ui.lineEditMonths.text()=='')):#We need to handle this special case. Bitmessage has its default behavior. The input is blank/blank
|
||||||
|
@ -3429,7 +3469,12 @@ class settingsDialog(QtGui.QDialog):
|
||||||
shared.config.get('bitmessagesettings', 'sockspassword')))
|
shared.config.get('bitmessagesettings', 'sockspassword')))
|
||||||
QtCore.QObject.connect(self.ui.comboBoxProxyType, QtCore.SIGNAL(
|
QtCore.QObject.connect(self.ui.comboBoxProxyType, QtCore.SIGNAL(
|
||||||
"currentIndexChanged(int)"), self.comboBoxProxyTypeChanged)
|
"currentIndexChanged(int)"), self.comboBoxProxyTypeChanged)
|
||||||
|
self.ui.lineEditMaxDownloadRate.setText(str(
|
||||||
|
shared.config.get('bitmessagesettings', 'maxdownloadrate')))
|
||||||
|
self.ui.lineEditMaxUploadRate.setText(str(
|
||||||
|
shared.config.get('bitmessagesettings', 'maxuploadrate')))
|
||||||
|
|
||||||
|
# Demanded difficulty tab
|
||||||
self.ui.lineEditTotalDifficulty.setText(str((float(shared.config.getint(
|
self.ui.lineEditTotalDifficulty.setText(str((float(shared.config.getint(
|
||||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
'bitmessagesettings', 'defaultnoncetrialsperbyte')) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)))
|
||||||
self.ui.lineEditSmallMessageDifficulty.setText(str((float(shared.config.getint(
|
self.ui.lineEditSmallMessageDifficulty.setText(str((float(shared.config.getint(
|
||||||
|
@ -3620,6 +3665,9 @@ class AddAddressDialog(QtGui.QDialog):
|
||||||
elif status == 'ripetoolong':
|
elif status == 'ripetoolong':
|
||||||
self.ui.labelAddressCheck.setText(_translate(
|
self.ui.labelAddressCheck.setText(_translate(
|
||||||
"MainWindow", "Some data encoded in the address is too long."))
|
"MainWindow", "Some data encoded in the address is too long."))
|
||||||
|
elif status == 'varintmalformed':
|
||||||
|
self.ui.labelAddressCheck.setText(_translate(
|
||||||
|
"MainWindow", "Some data encoded in the address is malformed."))
|
||||||
elif status == 'success':
|
elif status == 'success':
|
||||||
self.ui.labelAddressCheck.setText(
|
self.ui.labelAddressCheck.setText(
|
||||||
_translate("MainWindow", "Address is valid."))
|
_translate("MainWindow", "Address is valid."))
|
||||||
|
@ -3658,6 +3706,9 @@ class NewSubscriptionDialog(QtGui.QDialog):
|
||||||
elif status == 'ripetoolong':
|
elif status == 'ripetoolong':
|
||||||
self.ui.labelAddressCheck.setText(_translate(
|
self.ui.labelAddressCheck.setText(_translate(
|
||||||
"MainWindow", "Some data encoded in the address is too long."))
|
"MainWindow", "Some data encoded in the address is too long."))
|
||||||
|
elif status == 'varintmalformed':
|
||||||
|
self.ui.labelAddressCheck.setText(_translate(
|
||||||
|
"MainWindow", "Some data encoded in the address is malformed."))
|
||||||
elif status == 'success':
|
elif status == 'success':
|
||||||
self.ui.labelAddressCheck.setText(
|
self.ui.labelAddressCheck.setText(
|
||||||
_translate("MainWindow", "Address is valid."))
|
_translate("MainWindow", "Address is valid."))
|
||||||
|
@ -3670,7 +3721,7 @@ class NewSubscriptionDialog(QtGui.QDialog):
|
||||||
addressVersion) + encodeVarint(streamNumber) + ripe).digest()).digest()
|
addressVersion) + encodeVarint(streamNumber) + ripe).digest()).digest()
|
||||||
tag = doubleHashOfAddressData[32:]
|
tag = doubleHashOfAddressData[32:]
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''select hash from inventory where objecttype='broadcast' and tag=?''', tag)
|
'''select hash from inventory where objecttype=3 and tag=?''', tag)
|
||||||
if len(queryreturn) == 0:
|
if len(queryreturn) == 0:
|
||||||
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setText(
|
self.ui.checkBoxDisplayMessagesAlreadyInInventory.setText(
|
||||||
_translate("MainWindow", "There are no recent broadcasts from this address to display."))
|
_translate("MainWindow", "There are no recent broadcasts from this address to display."))
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
# Form implementation generated from reading ui file 'settings.ui'
|
# Form implementation generated from reading ui file 'settings.ui'
|
||||||
#
|
#
|
||||||
# Created: Mon May 19 15:54:27 2014
|
# Created: Tue Sep 09 15:13:28 2014
|
||||||
# by: PyQt4 UI code generator 4.10.3
|
# by: PyQt4 UI code generator 4.10.3
|
||||||
#
|
#
|
||||||
# WARNING! All changes made in this file will be lost!
|
# WARNING! All changes made in this file will be lost!
|
||||||
|
@ -128,6 +128,37 @@ class Ui_settingsDialog(object):
|
||||||
self.lineEditTCPPort.setObjectName(_fromUtf8("lineEditTCPPort"))
|
self.lineEditTCPPort.setObjectName(_fromUtf8("lineEditTCPPort"))
|
||||||
self.gridLayout_3.addWidget(self.lineEditTCPPort, 0, 2, 1, 1)
|
self.gridLayout_3.addWidget(self.lineEditTCPPort, 0, 2, 1, 1)
|
||||||
self.gridLayout_4.addWidget(self.groupBox1, 0, 0, 1, 1)
|
self.gridLayout_4.addWidget(self.groupBox1, 0, 0, 1, 1)
|
||||||
|
self.groupBox_3 = QtGui.QGroupBox(self.tabNetworkSettings)
|
||||||
|
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
|
||||||
|
self.gridLayout_9 = QtGui.QGridLayout(self.groupBox_3)
|
||||||
|
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
|
||||||
|
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
|
self.gridLayout_9.addItem(spacerItem1, 0, 0, 2, 1)
|
||||||
|
self.label_24 = QtGui.QLabel(self.groupBox_3)
|
||||||
|
self.label_24.setObjectName(_fromUtf8("label_24"))
|
||||||
|
self.gridLayout_9.addWidget(self.label_24, 0, 1, 1, 1)
|
||||||
|
self.lineEditMaxDownloadRate = QtGui.QLineEdit(self.groupBox_3)
|
||||||
|
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
|
||||||
|
sizePolicy.setHorizontalStretch(0)
|
||||||
|
sizePolicy.setVerticalStretch(0)
|
||||||
|
sizePolicy.setHeightForWidth(self.lineEditMaxDownloadRate.sizePolicy().hasHeightForWidth())
|
||||||
|
self.lineEditMaxDownloadRate.setSizePolicy(sizePolicy)
|
||||||
|
self.lineEditMaxDownloadRate.setMaximumSize(QtCore.QSize(60, 16777215))
|
||||||
|
self.lineEditMaxDownloadRate.setObjectName(_fromUtf8("lineEditMaxDownloadRate"))
|
||||||
|
self.gridLayout_9.addWidget(self.lineEditMaxDownloadRate, 0, 2, 1, 1)
|
||||||
|
self.label_25 = QtGui.QLabel(self.groupBox_3)
|
||||||
|
self.label_25.setObjectName(_fromUtf8("label_25"))
|
||||||
|
self.gridLayout_9.addWidget(self.label_25, 1, 1, 1, 1)
|
||||||
|
self.lineEditMaxUploadRate = QtGui.QLineEdit(self.groupBox_3)
|
||||||
|
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
|
||||||
|
sizePolicy.setHorizontalStretch(0)
|
||||||
|
sizePolicy.setVerticalStretch(0)
|
||||||
|
sizePolicy.setHeightForWidth(self.lineEditMaxUploadRate.sizePolicy().hasHeightForWidth())
|
||||||
|
self.lineEditMaxUploadRate.setSizePolicy(sizePolicy)
|
||||||
|
self.lineEditMaxUploadRate.setMaximumSize(QtCore.QSize(60, 16777215))
|
||||||
|
self.lineEditMaxUploadRate.setObjectName(_fromUtf8("lineEditMaxUploadRate"))
|
||||||
|
self.gridLayout_9.addWidget(self.lineEditMaxUploadRate, 1, 2, 1, 1)
|
||||||
|
self.gridLayout_4.addWidget(self.groupBox_3, 2, 0, 1, 1)
|
||||||
self.groupBox_2 = QtGui.QGroupBox(self.tabNetworkSettings)
|
self.groupBox_2 = QtGui.QGroupBox(self.tabNetworkSettings)
|
||||||
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
|
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
|
||||||
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_2)
|
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_2)
|
||||||
|
@ -176,8 +207,8 @@ class Ui_settingsDialog(object):
|
||||||
self.comboBoxProxyType.addItem(_fromUtf8(""))
|
self.comboBoxProxyType.addItem(_fromUtf8(""))
|
||||||
self.gridLayout_2.addWidget(self.comboBoxProxyType, 0, 1, 1, 1)
|
self.gridLayout_2.addWidget(self.comboBoxProxyType, 0, 1, 1, 1)
|
||||||
self.gridLayout_4.addWidget(self.groupBox_2, 1, 0, 1, 1)
|
self.gridLayout_4.addWidget(self.groupBox_2, 1, 0, 1, 1)
|
||||||
spacerItem1 = QtGui.QSpacerItem(20, 70, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
||||||
self.gridLayout_4.addItem(spacerItem1, 2, 0, 1, 1)
|
self.gridLayout_4.addItem(spacerItem2, 3, 0, 1, 1)
|
||||||
self.tabWidgetSettings.addTab(self.tabNetworkSettings, _fromUtf8(""))
|
self.tabWidgetSettings.addTab(self.tabNetworkSettings, _fromUtf8(""))
|
||||||
self.tabDemandedDifficulty = QtGui.QWidget()
|
self.tabDemandedDifficulty = QtGui.QWidget()
|
||||||
self.tabDemandedDifficulty.setObjectName(_fromUtf8("tabDemandedDifficulty"))
|
self.tabDemandedDifficulty.setObjectName(_fromUtf8("tabDemandedDifficulty"))
|
||||||
|
@ -199,8 +230,8 @@ class Ui_settingsDialog(object):
|
||||||
self.label_8.setWordWrap(True)
|
self.label_8.setWordWrap(True)
|
||||||
self.label_8.setObjectName(_fromUtf8("label_8"))
|
self.label_8.setObjectName(_fromUtf8("label_8"))
|
||||||
self.gridLayout_6.addWidget(self.label_8, 0, 0, 1, 3)
|
self.gridLayout_6.addWidget(self.label_8, 0, 0, 1, 3)
|
||||||
spacerItem2 = QtGui.QSpacerItem(203, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
spacerItem3 = QtGui.QSpacerItem(203, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
self.gridLayout_6.addItem(spacerItem2, 1, 0, 1, 1)
|
self.gridLayout_6.addItem(spacerItem3, 1, 0, 1, 1)
|
||||||
self.label_12 = QtGui.QLabel(self.tabDemandedDifficulty)
|
self.label_12 = QtGui.QLabel(self.tabDemandedDifficulty)
|
||||||
self.label_12.setWordWrap(True)
|
self.label_12.setWordWrap(True)
|
||||||
self.label_12.setObjectName(_fromUtf8("label_12"))
|
self.label_12.setObjectName(_fromUtf8("label_12"))
|
||||||
|
@ -223,10 +254,10 @@ class Ui_settingsDialog(object):
|
||||||
self.lineEditTotalDifficulty.setMaximumSize(QtCore.QSize(70, 16777215))
|
self.lineEditTotalDifficulty.setMaximumSize(QtCore.QSize(70, 16777215))
|
||||||
self.lineEditTotalDifficulty.setObjectName(_fromUtf8("lineEditTotalDifficulty"))
|
self.lineEditTotalDifficulty.setObjectName(_fromUtf8("lineEditTotalDifficulty"))
|
||||||
self.gridLayout_6.addWidget(self.lineEditTotalDifficulty, 1, 2, 1, 1)
|
self.gridLayout_6.addWidget(self.lineEditTotalDifficulty, 1, 2, 1, 1)
|
||||||
spacerItem3 = QtGui.QSpacerItem(203, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
spacerItem4 = QtGui.QSpacerItem(203, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
self.gridLayout_6.addItem(spacerItem3, 3, 0, 1, 1)
|
self.gridLayout_6.addItem(spacerItem4, 3, 0, 1, 1)
|
||||||
spacerItem4 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
spacerItem5 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
||||||
self.gridLayout_6.addItem(spacerItem4, 5, 0, 1, 1)
|
self.gridLayout_6.addItem(spacerItem5, 5, 0, 1, 1)
|
||||||
self.tabWidgetSettings.addTab(self.tabDemandedDifficulty, _fromUtf8(""))
|
self.tabWidgetSettings.addTab(self.tabDemandedDifficulty, _fromUtf8(""))
|
||||||
self.tabMaxAcceptableDifficulty = QtGui.QWidget()
|
self.tabMaxAcceptableDifficulty = QtGui.QWidget()
|
||||||
self.tabMaxAcceptableDifficulty.setObjectName(_fromUtf8("tabMaxAcceptableDifficulty"))
|
self.tabMaxAcceptableDifficulty.setObjectName(_fromUtf8("tabMaxAcceptableDifficulty"))
|
||||||
|
@ -236,8 +267,8 @@ class Ui_settingsDialog(object):
|
||||||
self.label_15.setWordWrap(True)
|
self.label_15.setWordWrap(True)
|
||||||
self.label_15.setObjectName(_fromUtf8("label_15"))
|
self.label_15.setObjectName(_fromUtf8("label_15"))
|
||||||
self.gridLayout_7.addWidget(self.label_15, 0, 0, 1, 3)
|
self.gridLayout_7.addWidget(self.label_15, 0, 0, 1, 3)
|
||||||
spacerItem5 = QtGui.QSpacerItem(102, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
spacerItem6 = QtGui.QSpacerItem(102, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
self.gridLayout_7.addItem(spacerItem5, 1, 0, 1, 1)
|
self.gridLayout_7.addItem(spacerItem6, 1, 0, 1, 1)
|
||||||
self.label_13 = QtGui.QLabel(self.tabMaxAcceptableDifficulty)
|
self.label_13 = QtGui.QLabel(self.tabMaxAcceptableDifficulty)
|
||||||
self.label_13.setLayoutDirection(QtCore.Qt.LeftToRight)
|
self.label_13.setLayoutDirection(QtCore.Qt.LeftToRight)
|
||||||
self.label_13.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
self.label_13.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
||||||
|
@ -252,8 +283,8 @@ class Ui_settingsDialog(object):
|
||||||
self.lineEditMaxAcceptableTotalDifficulty.setMaximumSize(QtCore.QSize(70, 16777215))
|
self.lineEditMaxAcceptableTotalDifficulty.setMaximumSize(QtCore.QSize(70, 16777215))
|
||||||
self.lineEditMaxAcceptableTotalDifficulty.setObjectName(_fromUtf8("lineEditMaxAcceptableTotalDifficulty"))
|
self.lineEditMaxAcceptableTotalDifficulty.setObjectName(_fromUtf8("lineEditMaxAcceptableTotalDifficulty"))
|
||||||
self.gridLayout_7.addWidget(self.lineEditMaxAcceptableTotalDifficulty, 1, 2, 1, 1)
|
self.gridLayout_7.addWidget(self.lineEditMaxAcceptableTotalDifficulty, 1, 2, 1, 1)
|
||||||
spacerItem6 = QtGui.QSpacerItem(102, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
spacerItem7 = QtGui.QSpacerItem(102, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
self.gridLayout_7.addItem(spacerItem6, 2, 0, 1, 1)
|
self.gridLayout_7.addItem(spacerItem7, 2, 0, 1, 1)
|
||||||
self.label_14 = QtGui.QLabel(self.tabMaxAcceptableDifficulty)
|
self.label_14 = QtGui.QLabel(self.tabMaxAcceptableDifficulty)
|
||||||
self.label_14.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
self.label_14.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
||||||
self.label_14.setObjectName(_fromUtf8("label_14"))
|
self.label_14.setObjectName(_fromUtf8("label_14"))
|
||||||
|
@ -267,15 +298,15 @@ class Ui_settingsDialog(object):
|
||||||
self.lineEditMaxAcceptableSmallMessageDifficulty.setMaximumSize(QtCore.QSize(70, 16777215))
|
self.lineEditMaxAcceptableSmallMessageDifficulty.setMaximumSize(QtCore.QSize(70, 16777215))
|
||||||
self.lineEditMaxAcceptableSmallMessageDifficulty.setObjectName(_fromUtf8("lineEditMaxAcceptableSmallMessageDifficulty"))
|
self.lineEditMaxAcceptableSmallMessageDifficulty.setObjectName(_fromUtf8("lineEditMaxAcceptableSmallMessageDifficulty"))
|
||||||
self.gridLayout_7.addWidget(self.lineEditMaxAcceptableSmallMessageDifficulty, 2, 2, 1, 1)
|
self.gridLayout_7.addWidget(self.lineEditMaxAcceptableSmallMessageDifficulty, 2, 2, 1, 1)
|
||||||
spacerItem7 = QtGui.QSpacerItem(20, 147, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
spacerItem8 = QtGui.QSpacerItem(20, 147, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
||||||
self.gridLayout_7.addItem(spacerItem7, 3, 1, 1, 1)
|
self.gridLayout_7.addItem(spacerItem8, 3, 1, 1, 1)
|
||||||
self.tabWidgetSettings.addTab(self.tabMaxAcceptableDifficulty, _fromUtf8(""))
|
self.tabWidgetSettings.addTab(self.tabMaxAcceptableDifficulty, _fromUtf8(""))
|
||||||
self.tabNamecoin = QtGui.QWidget()
|
self.tabNamecoin = QtGui.QWidget()
|
||||||
self.tabNamecoin.setObjectName(_fromUtf8("tabNamecoin"))
|
self.tabNamecoin.setObjectName(_fromUtf8("tabNamecoin"))
|
||||||
self.gridLayout_8 = QtGui.QGridLayout(self.tabNamecoin)
|
self.gridLayout_8 = QtGui.QGridLayout(self.tabNamecoin)
|
||||||
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
|
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
|
||||||
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
self.gridLayout_8.addItem(spacerItem8, 2, 0, 1, 1)
|
self.gridLayout_8.addItem(spacerItem9, 2, 0, 1, 1)
|
||||||
self.label_16 = QtGui.QLabel(self.tabNamecoin)
|
self.label_16 = QtGui.QLabel(self.tabNamecoin)
|
||||||
self.label_16.setWordWrap(True)
|
self.label_16.setWordWrap(True)
|
||||||
self.label_16.setObjectName(_fromUtf8("label_16"))
|
self.label_16.setObjectName(_fromUtf8("label_16"))
|
||||||
|
@ -287,10 +318,10 @@ class Ui_settingsDialog(object):
|
||||||
self.lineEditNamecoinHost = QtGui.QLineEdit(self.tabNamecoin)
|
self.lineEditNamecoinHost = QtGui.QLineEdit(self.tabNamecoin)
|
||||||
self.lineEditNamecoinHost.setObjectName(_fromUtf8("lineEditNamecoinHost"))
|
self.lineEditNamecoinHost.setObjectName(_fromUtf8("lineEditNamecoinHost"))
|
||||||
self.gridLayout_8.addWidget(self.lineEditNamecoinHost, 2, 2, 1, 1)
|
self.gridLayout_8.addWidget(self.lineEditNamecoinHost, 2, 2, 1, 1)
|
||||||
spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
|
||||||
self.gridLayout_8.addItem(spacerItem9, 3, 0, 1, 1)
|
|
||||||
spacerItem10 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
spacerItem10 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
self.gridLayout_8.addItem(spacerItem10, 4, 0, 1, 1)
|
self.gridLayout_8.addItem(spacerItem10, 3, 0, 1, 1)
|
||||||
|
spacerItem11 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
|
self.gridLayout_8.addItem(spacerItem11, 4, 0, 1, 1)
|
||||||
self.label_18 = QtGui.QLabel(self.tabNamecoin)
|
self.label_18 = QtGui.QLabel(self.tabNamecoin)
|
||||||
self.label_18.setEnabled(True)
|
self.label_18.setEnabled(True)
|
||||||
self.label_18.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
self.label_18.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
||||||
|
@ -299,8 +330,8 @@ class Ui_settingsDialog(object):
|
||||||
self.lineEditNamecoinPort = QtGui.QLineEdit(self.tabNamecoin)
|
self.lineEditNamecoinPort = QtGui.QLineEdit(self.tabNamecoin)
|
||||||
self.lineEditNamecoinPort.setObjectName(_fromUtf8("lineEditNamecoinPort"))
|
self.lineEditNamecoinPort.setObjectName(_fromUtf8("lineEditNamecoinPort"))
|
||||||
self.gridLayout_8.addWidget(self.lineEditNamecoinPort, 3, 2, 1, 1)
|
self.gridLayout_8.addWidget(self.lineEditNamecoinPort, 3, 2, 1, 1)
|
||||||
spacerItem11 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
spacerItem12 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
||||||
self.gridLayout_8.addItem(spacerItem11, 8, 1, 1, 1)
|
self.gridLayout_8.addItem(spacerItem12, 8, 1, 1, 1)
|
||||||
self.labelNamecoinUser = QtGui.QLabel(self.tabNamecoin)
|
self.labelNamecoinUser = QtGui.QLabel(self.tabNamecoin)
|
||||||
self.labelNamecoinUser.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
self.labelNamecoinUser.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
||||||
self.labelNamecoinUser.setObjectName(_fromUtf8("labelNamecoinUser"))
|
self.labelNamecoinUser.setObjectName(_fromUtf8("labelNamecoinUser"))
|
||||||
|
@ -308,8 +339,8 @@ class Ui_settingsDialog(object):
|
||||||
self.lineEditNamecoinUser = QtGui.QLineEdit(self.tabNamecoin)
|
self.lineEditNamecoinUser = QtGui.QLineEdit(self.tabNamecoin)
|
||||||
self.lineEditNamecoinUser.setObjectName(_fromUtf8("lineEditNamecoinUser"))
|
self.lineEditNamecoinUser.setObjectName(_fromUtf8("lineEditNamecoinUser"))
|
||||||
self.gridLayout_8.addWidget(self.lineEditNamecoinUser, 4, 2, 1, 1)
|
self.gridLayout_8.addWidget(self.lineEditNamecoinUser, 4, 2, 1, 1)
|
||||||
spacerItem12 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
spacerItem13 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
self.gridLayout_8.addItem(spacerItem12, 5, 0, 1, 1)
|
self.gridLayout_8.addItem(spacerItem13, 5, 0, 1, 1)
|
||||||
self.labelNamecoinPassword = QtGui.QLabel(self.tabNamecoin)
|
self.labelNamecoinPassword = QtGui.QLabel(self.tabNamecoin)
|
||||||
self.labelNamecoinPassword.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
self.labelNamecoinPassword.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
|
||||||
self.labelNamecoinPassword.setObjectName(_fromUtf8("labelNamecoinPassword"))
|
self.labelNamecoinPassword.setObjectName(_fromUtf8("labelNamecoinPassword"))
|
||||||
|
@ -347,8 +378,8 @@ class Ui_settingsDialog(object):
|
||||||
self.label_7.setWordWrap(True)
|
self.label_7.setWordWrap(True)
|
||||||
self.label_7.setObjectName(_fromUtf8("label_7"))
|
self.label_7.setObjectName(_fromUtf8("label_7"))
|
||||||
self.gridLayout_5.addWidget(self.label_7, 0, 0, 1, 3)
|
self.gridLayout_5.addWidget(self.label_7, 0, 0, 1, 3)
|
||||||
spacerItem13 = QtGui.QSpacerItem(212, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
spacerItem14 = QtGui.QSpacerItem(212, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
|
||||||
self.gridLayout_5.addItem(spacerItem13, 1, 0, 1, 1)
|
self.gridLayout_5.addItem(spacerItem14, 1, 0, 1, 1)
|
||||||
self.widget = QtGui.QWidget(self.tabResendsExpire)
|
self.widget = QtGui.QWidget(self.tabResendsExpire)
|
||||||
self.widget.setMinimumSize(QtCore.QSize(231, 75))
|
self.widget.setMinimumSize(QtCore.QSize(231, 75))
|
||||||
self.widget.setObjectName(_fromUtf8("widget"))
|
self.widget.setObjectName(_fromUtf8("widget"))
|
||||||
|
@ -373,8 +404,8 @@ class Ui_settingsDialog(object):
|
||||||
self.label_23.setGeometry(QtCore.QRect(170, 41, 71, 16))
|
self.label_23.setGeometry(QtCore.QRect(170, 41, 71, 16))
|
||||||
self.label_23.setObjectName(_fromUtf8("label_23"))
|
self.label_23.setObjectName(_fromUtf8("label_23"))
|
||||||
self.gridLayout_5.addWidget(self.widget, 1, 2, 1, 1)
|
self.gridLayout_5.addWidget(self.widget, 1, 2, 1, 1)
|
||||||
spacerItem14 = QtGui.QSpacerItem(20, 129, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
spacerItem15 = QtGui.QSpacerItem(20, 129, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
|
||||||
self.gridLayout_5.addItem(spacerItem14, 2, 1, 1, 1)
|
self.gridLayout_5.addItem(spacerItem15, 2, 1, 1, 1)
|
||||||
self.tabWidgetSettings.addTab(self.tabResendsExpire, _fromUtf8(""))
|
self.tabWidgetSettings.addTab(self.tabResendsExpire, _fromUtf8(""))
|
||||||
self.gridLayout.addWidget(self.tabWidgetSettings, 0, 0, 1, 1)
|
self.gridLayout.addWidget(self.tabWidgetSettings, 0, 0, 1, 1)
|
||||||
|
|
||||||
|
@ -416,6 +447,9 @@ class Ui_settingsDialog(object):
|
||||||
self.tabWidgetSettings.setTabText(self.tabWidgetSettings.indexOf(self.tabUserInterface), _translate("settingsDialog", "User Interface", None))
|
self.tabWidgetSettings.setTabText(self.tabWidgetSettings.indexOf(self.tabUserInterface), _translate("settingsDialog", "User Interface", None))
|
||||||
self.groupBox1.setTitle(_translate("settingsDialog", "Listening port", None))
|
self.groupBox1.setTitle(_translate("settingsDialog", "Listening port", None))
|
||||||
self.label.setText(_translate("settingsDialog", "Listen for connections on port:", None))
|
self.label.setText(_translate("settingsDialog", "Listen for connections on port:", None))
|
||||||
|
self.groupBox_3.setTitle(_translate("settingsDialog", "Bandwidth limit", None))
|
||||||
|
self.label_24.setText(_translate("settingsDialog", "Maximum download rate (kB/s): [0: unlimited]", None))
|
||||||
|
self.label_25.setText(_translate("settingsDialog", "Maximum upload rate (kB/s): [0: unlimited]", None))
|
||||||
self.groupBox_2.setTitle(_translate("settingsDialog", "Proxy server / Tor", None))
|
self.groupBox_2.setTitle(_translate("settingsDialog", "Proxy server / Tor", None))
|
||||||
self.label_2.setText(_translate("settingsDialog", "Type:", None))
|
self.label_2.setText(_translate("settingsDialog", "Type:", None))
|
||||||
self.label_3.setText(_translate("settingsDialog", "Server hostname:", None))
|
self.label_3.setText(_translate("settingsDialog", "Server hostname:", None))
|
||||||
|
|
|
@ -247,6 +247,74 @@
|
||||||
</layout>
|
</layout>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
|
<item row="2" column="0">
|
||||||
|
<widget class="QGroupBox" name="groupBox_3">
|
||||||
|
<property name="title">
|
||||||
|
<string>Bandwidth limit</string>
|
||||||
|
</property>
|
||||||
|
<layout class="QGridLayout" name="gridLayout_9">
|
||||||
|
<item row="0" column="0" rowspan="2">
|
||||||
|
<spacer name="horizontalSpacer_11">
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Horizontal</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>40</width>
|
||||||
|
<height>20</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item row="0" column="1">
|
||||||
|
<widget class="QLabel" name="label_24">
|
||||||
|
<property name="text">
|
||||||
|
<string>Maximum download rate (kB/s): [0: unlimited]</string>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item row="0" column="2">
|
||||||
|
<widget class="QLineEdit" name="lineEditMaxDownloadRate">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="maximumSize">
|
||||||
|
<size>
|
||||||
|
<width>60</width>
|
||||||
|
<height>16777215</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item row="1" column="1">
|
||||||
|
<widget class="QLabel" name="label_25">
|
||||||
|
<property name="text">
|
||||||
|
<string>Maximum upload rate (kB/s): [0: unlimited]</string>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item row="1" column="2">
|
||||||
|
<widget class="QLineEdit" name="lineEditMaxUploadRate">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="maximumSize">
|
||||||
|
<size>
|
||||||
|
<width>60</width>
|
||||||
|
<height>16777215</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
</layout>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
<item row="1" column="0">
|
<item row="1" column="0">
|
||||||
<widget class="QGroupBox" name="groupBox_2">
|
<widget class="QGroupBox" name="groupBox_2">
|
||||||
<property name="title">
|
<property name="title">
|
||||||
|
@ -350,7 +418,7 @@
|
||||||
</layout>
|
</layout>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item row="2" column="0">
|
<item row="3" column="0">
|
||||||
<spacer name="verticalSpacer">
|
<spacer name="verticalSpacer">
|
||||||
<property name="orientation">
|
<property name="orientation">
|
||||||
<enum>Qt::Vertical</enum>
|
<enum>Qt::Vertical</enum>
|
||||||
|
@ -358,7 +426,7 @@
|
||||||
<property name="sizeHint" stdset="0">
|
<property name="sizeHint" stdset="0">
|
||||||
<size>
|
<size>
|
||||||
<width>20</width>
|
<width>20</width>
|
||||||
<height>70</height>
|
<height>40</height>
|
||||||
</size>
|
</size>
|
||||||
</property>
|
</property>
|
||||||
</spacer>
|
</spacer>
|
||||||
|
|
|
@ -1,19 +1,19 @@
|
||||||
from setuptools import setup
|
from setuptools import setup
|
||||||
|
|
||||||
name = "Bitmessage"
|
name = "Bitmessage"
|
||||||
version = "0.4.3"
|
version = "0.4.4"
|
||||||
mainscript = ["bitmessagemain.py"]
|
mainscript = ["bitmessagemain.py"]
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name = name,
|
name = name,
|
||||||
version = version,
|
version = version,
|
||||||
app = mainscript,
|
app = mainscript,
|
||||||
setup_requires = ["py2app"],
|
setup_requires = ["py2app"],
|
||||||
options = dict(
|
options = dict(
|
||||||
py2app = dict(
|
py2app = dict(
|
||||||
resources = ["images", "translations"],
|
resources = ["images", "translations"],
|
||||||
includes = ['sip', 'PyQt4._qt'],
|
includes = ['sip', 'PyQt4._qt'],
|
||||||
iconfile = "images/bitmessage.icns"
|
iconfile = "images/bitmessage.icns"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
|
@ -7,8 +7,9 @@ from struct import unpack, pack
|
||||||
import sys
|
import sys
|
||||||
import string
|
import string
|
||||||
from subprocess import call # used when the API must execute an outside program
|
from subprocess import call # used when the API must execute an outside program
|
||||||
from pyelliptic.openssl import OpenSSL
|
import traceback
|
||||||
|
|
||||||
|
from pyelliptic.openssl import OpenSSL
|
||||||
import highlevelcrypto
|
import highlevelcrypto
|
||||||
from addresses import *
|
from addresses import *
|
||||||
import helper_generic
|
import helper_generic
|
||||||
|
@ -51,18 +52,23 @@ class objectProcessor(threading.Thread):
|
||||||
while True:
|
while True:
|
||||||
objectType, data = shared.objectProcessorQueue.get()
|
objectType, data = shared.objectProcessorQueue.get()
|
||||||
|
|
||||||
if objectType == 'getpubkey':
|
try:
|
||||||
self.processgetpubkey(data)
|
if objectType == 0: # getpubkey
|
||||||
elif objectType == 'pubkey':
|
self.processgetpubkey(data)
|
||||||
self.processpubkey(data)
|
elif objectType == 1: #pubkey
|
||||||
elif objectType == 'msg':
|
self.processpubkey(data)
|
||||||
self.processmsg(data)
|
elif objectType == 2: #msg
|
||||||
elif objectType == 'broadcast':
|
self.processmsg(data)
|
||||||
self.processbroadcast(data)
|
elif objectType == 3: #broadcast
|
||||||
elif objectType == 'checkShutdownVariable': # is more of a command, not an object type. Is used to get this thread past the queue.get() so that it will check the shutdown variable.
|
self.processbroadcast(data)
|
||||||
pass
|
elif objectType == 'checkShutdownVariable': # is more of a command, not an object type. Is used to get this thread past the queue.get() so that it will check the shutdown variable.
|
||||||
else:
|
pass
|
||||||
logger.critical('Error! Bug! The class_objectProcessor was passed an object type it doesn\'t recognize: %s' % str(objectType))
|
else:
|
||||||
|
logger.critical('Error! Bug! The class_objectProcessor was passed an object type it doesn\'t recognize: %s' % str(objectType))
|
||||||
|
except varintDecodeError as e:
|
||||||
|
logger.debug("There was a problem with a varint while processing an object. Some details: %s" % e)
|
||||||
|
except Exception as e:
|
||||||
|
logger.critical("Critical error within objectProcessorThread: \n%s" % traceback.format_exc())
|
||||||
|
|
||||||
with shared.objectProcessorQueueSizeLock:
|
with shared.objectProcessorQueueSizeLock:
|
||||||
shared.objectProcessorQueueSize -= len(data) # We maintain objectProcessorQueueSize so that we will slow down requesting objects if too much data accumulates in the queue.
|
shared.objectProcessorQueueSize -= len(data) # We maintain objectProcessorQueueSize so that we will slow down requesting objects if too much data accumulates in the queue.
|
||||||
|
@ -83,17 +89,7 @@ class objectProcessor(threading.Thread):
|
||||||
break
|
break
|
||||||
|
|
||||||
def processgetpubkey(self, data):
|
def processgetpubkey(self, data):
|
||||||
readPosition = 8 # bypass the nonce
|
readPosition = 20 # bypass the nonce, time, and object type
|
||||||
embeddedTime, = unpack('>I', data[readPosition:readPosition + 4])
|
|
||||||
|
|
||||||
# This section is used for the transition from 32 bit time to 64 bit
|
|
||||||
# time in the protocol.
|
|
||||||
if embeddedTime == 0:
|
|
||||||
embeddedTime, = unpack('>Q', data[readPosition:readPosition + 8])
|
|
||||||
readPosition += 8
|
|
||||||
else:
|
|
||||||
readPosition += 4
|
|
||||||
|
|
||||||
requestedAddressVersionNumber, addressVersionLength = decodeVarint(
|
requestedAddressVersionNumber, addressVersionLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += addressVersionLength
|
readPosition += addressVersionLength
|
||||||
|
@ -147,7 +143,7 @@ class objectProcessor(threading.Thread):
|
||||||
myAddress, 'lastpubkeysendtime'))
|
myAddress, 'lastpubkeysendtime'))
|
||||||
except:
|
except:
|
||||||
lastPubkeySendTime = 0
|
lastPubkeySendTime = 0
|
||||||
if lastPubkeySendTime > time.time() - shared.lengthOfTimeToHoldOnToAllPubkeys: # If the last time we sent our pubkey was more recent than 28 days ago...
|
if lastPubkeySendTime > time.time() - 2419200: # If the last time we sent our pubkey was more recent than 28 days ago...
|
||||||
logger.info('Found getpubkey-requested-item in my list of EC hashes BUT we already sent it recently. Ignoring request. The lastPubkeySendTime is: %s' % lastPubkeySendTime)
|
logger.info('Found getpubkey-requested-item in my list of EC hashes BUT we already sent it recently. Ignoring request. The lastPubkeySendTime is: %s' % lastPubkeySendTime)
|
||||||
return
|
return
|
||||||
logger.info('Found getpubkey-requested-hash in my list of EC hashes. Telling Worker thread to do the POW for a pubkey message and send it out.')
|
logger.info('Found getpubkey-requested-hash in my list of EC hashes. Telling Worker thread to do the POW for a pubkey message and send it out.')
|
||||||
|
@ -166,17 +162,8 @@ class objectProcessor(threading.Thread):
|
||||||
shared.numberOfPubkeysProcessed += 1
|
shared.numberOfPubkeysProcessed += 1
|
||||||
shared.UISignalQueue.put((
|
shared.UISignalQueue.put((
|
||||||
'updateNumberOfPubkeysProcessed', 'no data'))
|
'updateNumberOfPubkeysProcessed', 'no data'))
|
||||||
readPosition = 8 # bypass the nonce
|
embeddedTime, = unpack('>Q', data[8:16])
|
||||||
embeddedTime, = unpack('>I', data[readPosition:readPosition + 4])
|
readPosition = 20 # bypass the nonce, time, and object type
|
||||||
|
|
||||||
# This section is used for the transition from 32 bit time to 64 bit
|
|
||||||
# time in the protocol.
|
|
||||||
if embeddedTime == 0:
|
|
||||||
embeddedTime, = unpack('>Q', data[readPosition:readPosition + 8])
|
|
||||||
readPosition += 8
|
|
||||||
else:
|
|
||||||
readPosition += 4
|
|
||||||
|
|
||||||
addressVersion, varintLength = decodeVarint(
|
addressVersion, varintLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += varintLength
|
readPosition += varintLength
|
||||||
|
@ -225,15 +212,25 @@ class objectProcessor(threading.Thread):
|
||||||
|
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''SELECT usedpersonally FROM pubkeys WHERE hash=? AND addressversion=? AND usedpersonally='yes' ''', ripe, addressVersion)
|
'''SELECT usedpersonally FROM pubkeys WHERE hash=? AND addressversion=? AND usedpersonally='yes' ''', ripe, addressVersion)
|
||||||
|
|
||||||
|
"""
|
||||||
|
With the changes in protocol v3, we have to be careful to store pubkey data
|
||||||
|
in the database the same way we did before to maintain backwards compatibility
|
||||||
|
with what is in people's databases already. This means that for v2 keys, we
|
||||||
|
must store the nonce, the time, and then everything else starting with the
|
||||||
|
address version.
|
||||||
|
"""
|
||||||
|
dataToStore = '\x00' * 8 # fake nonce
|
||||||
|
dataToStore += data[8:16] # the time
|
||||||
|
dataToStore += data[20:] # everything else
|
||||||
|
|
||||||
if queryreturn != []: # if this pubkey is already in our database and if we have used it personally:
|
if queryreturn != []: # if this pubkey is already in our database and if we have used it personally:
|
||||||
logger.info('We HAVE used this pubkey personally. Updating time.')
|
logger.info('We HAVE used this pubkey personally. Updating time.')
|
||||||
t = (ripe, addressVersion, data, embeddedTime, 'yes')
|
t = (ripe, addressVersion, dataToStore, int(time.time()), 'yes')
|
||||||
else:
|
else:
|
||||||
logger.info('We have NOT used this pubkey personally. Inserting in database.')
|
logger.info('We have NOT used this pubkey personally. Inserting in database.')
|
||||||
t = (ripe, addressVersion, data, embeddedTime, 'no')
|
t = (ripe, addressVersion, dataToStore, int(time.time()), 'no')
|
||||||
# This will also update the embeddedTime.
|
|
||||||
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
||||||
# shared.workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
|
|
||||||
self.possibleNewPubkey(ripe = ripe)
|
self.possibleNewPubkey(ripe = ripe)
|
||||||
if addressVersion == 3:
|
if addressVersion == 3:
|
||||||
if len(data) < 170: # sanity check.
|
if len(data) < 170: # sanity check.
|
||||||
|
@ -242,9 +239,6 @@ class objectProcessor(threading.Thread):
|
||||||
bitfieldBehaviors = data[readPosition:readPosition + 4]
|
bitfieldBehaviors = data[readPosition:readPosition + 4]
|
||||||
readPosition += 4
|
readPosition += 4
|
||||||
publicSigningKey = '\x04' + data[readPosition:readPosition + 64]
|
publicSigningKey = '\x04' + data[readPosition:readPosition + 64]
|
||||||
# Is it possible for a public key to be invalid such that trying to
|
|
||||||
# encrypt or sign with it will cause an error? If it is, we should
|
|
||||||
# probably test these keys here.
|
|
||||||
readPosition += 64
|
readPosition += 64
|
||||||
publicEncryptionKey = '\x04' + data[readPosition:readPosition + 64]
|
publicEncryptionKey = '\x04' + data[readPosition:readPosition + 64]
|
||||||
readPosition += 64
|
readPosition += 64
|
||||||
|
@ -259,14 +253,32 @@ class objectProcessor(threading.Thread):
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += signatureLengthLength
|
readPosition += signatureLengthLength
|
||||||
signature = data[readPosition:readPosition + signatureLength]
|
signature = data[readPosition:readPosition + signatureLength]
|
||||||
try:
|
"""
|
||||||
if not highlevelcrypto.verify(data[8:endOfSignedDataPosition], signature, publicSigningKey.encode('hex')):
|
With the changes in protocol v3, to maintain backwards compatibility, signatures will be sent
|
||||||
logger.warning('ECDSA verify failed (within processpubkey)')
|
the 'old' way during an upgrade period and then a 'new' simpler way after that. We will therefore
|
||||||
|
check the sig both ways.
|
||||||
|
Old way:
|
||||||
|
signedData = timePubkeyWasSigned(4 bytes) + addressVersion through extra_bytes
|
||||||
|
New way:
|
||||||
|
signedData = all of the payload data, from the time down through the extra_bytes
|
||||||
|
|
||||||
|
The timePubkeyWasSigned will be calculated by subtracting 28 days form the embedded expiresTime.
|
||||||
|
"""
|
||||||
|
expiresTime, = unpack('>Q', data[8:16])
|
||||||
|
TTL = 28 * 24 * 60 * 60
|
||||||
|
signedData = pack('>I', (expiresTime - TTL)) # the time that the pubkey was signed. 4 bytes.
|
||||||
|
signedData += data[20:endOfSignedDataPosition] # the address version down through the payloadLengthExtraBytes
|
||||||
|
|
||||||
|
if highlevelcrypto.verify(signedData, signature, publicSigningKey.encode('hex')):
|
||||||
|
logger.info('ECDSA verify passed (within processpubkey, old method)')
|
||||||
|
else:
|
||||||
|
logger.warning('ECDSA verify failed (within processpubkey, old method)')
|
||||||
|
# let us try the newer signature method
|
||||||
|
if highlevelcrypto.verify(data[8:endOfSignedDataPosition], signature, publicSigningKey.encode('hex')):
|
||||||
|
logger.info('ECDSA verify passed (within processpubkey, new method)')
|
||||||
|
else:
|
||||||
|
logger.warning('ECDSA verify failed (within processpubkey, new method)')
|
||||||
return
|
return
|
||||||
logger.info('ECDSA verify passed (within processpubkey)')
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning('ECDSA verify failed (within processpubkey) %s' % err)
|
|
||||||
return
|
|
||||||
|
|
||||||
sha = hashlib.new('sha512')
|
sha = hashlib.new('sha512')
|
||||||
sha.update(publicSigningKey + publicEncryptionKey)
|
sha.update(publicSigningKey + publicEncryptionKey)
|
||||||
|
@ -286,109 +298,45 @@ class objectProcessor(threading.Thread):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
With the changes in protocol v3, we have to be careful to store pubkey data
|
||||||
|
in the database the same way we did before to maintain backwards compatibility
|
||||||
|
with what is in people's databases already. This means that for v3 keys, we
|
||||||
|
must store the nonce, the time, and then everything else starting with the
|
||||||
|
address version.
|
||||||
|
"""
|
||||||
|
dataToStore = '\x00' * 8 # fake nonce
|
||||||
|
dataToStore += data[8:16] # the time
|
||||||
|
dataToStore += data[20:] # everything else
|
||||||
|
|
||||||
queryreturn = sqlQuery('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND addressversion=? AND usedpersonally='yes' ''', ripe, addressVersion)
|
queryreturn = sqlQuery('''SELECT usedpersonally FROM pubkeys WHERE hash=? AND addressversion=? AND usedpersonally='yes' ''', ripe, addressVersion)
|
||||||
if queryreturn != []: # if this pubkey is already in our database and if we have used it personally:
|
if queryreturn != []: # if this pubkey is already in our database and if we have used it personally:
|
||||||
logger.info('We HAVE used this pubkey personally. Updating time.')
|
logger.info('We HAVE used this pubkey personally. Updating time.')
|
||||||
t = (ripe, addressVersion, data, embeddedTime, 'yes')
|
t = (ripe, addressVersion, dataToStore, int(time.time()), 'yes')
|
||||||
else:
|
else:
|
||||||
logger.info('We have NOT used this pubkey personally. Inserting in database.')
|
logger.info('We have NOT used this pubkey personally. Inserting in database.')
|
||||||
t = (ripe, addressVersion, data, embeddedTime, 'no')
|
t = (ripe, addressVersion, dataToStore, int(time.time()), 'no')
|
||||||
# This will also update the embeddedTime.
|
|
||||||
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
||||||
self.possibleNewPubkey(ripe = ripe)
|
self.possibleNewPubkey(ripe = ripe)
|
||||||
|
|
||||||
if addressVersion == 4:
|
if addressVersion == 4:
|
||||||
"""
|
|
||||||
There exist a function: shared.decryptAndCheckPubkeyPayload which does something almost
|
|
||||||
the same as this section of code. There are differences, however; one being that
|
|
||||||
decryptAndCheckPubkeyPayload requires that a cryptor object be created each time it is
|
|
||||||
run which is an expensive operation. This, on the other hand, keeps them saved in
|
|
||||||
the shared.neededPubkeys dictionary so that if an attacker sends us many
|
|
||||||
incorrectly-tagged pubkeys, which would force us to try to decrypt them, this code
|
|
||||||
would run and handle that event quite quickly.
|
|
||||||
"""
|
|
||||||
if len(data) < 350: # sanity check.
|
if len(data) < 350: # sanity check.
|
||||||
logger.debug('(within processpubkey) payloadLength less than 350. Sanity check failed.')
|
logger.debug('(within processpubkey) payloadLength less than 350. Sanity check failed.')
|
||||||
return
|
return
|
||||||
signedData = data[8:readPosition] # Some of the signed data is not encrypted so let's keep it for now.
|
|
||||||
tag = data[readPosition:readPosition + 32]
|
tag = data[readPosition:readPosition + 32]
|
||||||
readPosition += 32
|
|
||||||
encryptedData = data[readPosition:]
|
|
||||||
if tag not in shared.neededPubkeys:
|
if tag not in shared.neededPubkeys:
|
||||||
logger.info('We don\'t need this v4 pubkey. We didn\'t ask for it.')
|
logger.info('We don\'t need this v4 pubkey. We didn\'t ask for it.')
|
||||||
return
|
return
|
||||||
|
|
||||||
# Let us try to decrypt the pubkey
|
|
||||||
cryptorObject = shared.neededPubkeys[tag]
|
|
||||||
try:
|
|
||||||
decryptedData = cryptorObject.decrypt(encryptedData)
|
|
||||||
except:
|
|
||||||
# Someone must have encrypted some data with a different key
|
|
||||||
# but tagged it with a tag for which we are watching.
|
|
||||||
logger.info('Pubkey decryption was unsuccessful.')
|
|
||||||
return
|
|
||||||
|
|
||||||
readPosition = 0
|
|
||||||
bitfieldBehaviors = decryptedData[readPosition:readPosition + 4]
|
|
||||||
readPosition += 4
|
|
||||||
publicSigningKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
|
||||||
# Is it possible for a public key to be invalid such that trying to
|
|
||||||
# encrypt or check a sig with it will cause an error? If it is, we
|
|
||||||
# should probably test these keys here.
|
|
||||||
readPosition += 64
|
|
||||||
publicEncryptionKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
|
||||||
readPosition += 64
|
|
||||||
specifiedNonceTrialsPerByte, specifiedNonceTrialsPerByteLength = decodeVarint(
|
|
||||||
decryptedData[readPosition:readPosition + 10])
|
|
||||||
readPosition += specifiedNonceTrialsPerByteLength
|
|
||||||
specifiedPayloadLengthExtraBytes, specifiedPayloadLengthExtraBytesLength = decodeVarint(
|
|
||||||
decryptedData[readPosition:readPosition + 10])
|
|
||||||
readPosition += specifiedPayloadLengthExtraBytesLength
|
|
||||||
signedData += decryptedData[:readPosition]
|
|
||||||
signatureLength, signatureLengthLength = decodeVarint(
|
|
||||||
decryptedData[readPosition:readPosition + 10])
|
|
||||||
readPosition += signatureLengthLength
|
|
||||||
signature = decryptedData[readPosition:readPosition + signatureLength]
|
|
||||||
try:
|
|
||||||
if not highlevelcrypto.verify(signedData, signature, publicSigningKey.encode('hex')):
|
|
||||||
logger.info('ECDSA verify failed (within processpubkey)')
|
|
||||||
return
|
|
||||||
logger.info('ECDSA verify passed (within processpubkey)')
|
|
||||||
except Exception as err:
|
|
||||||
logger.info('ECDSA verify failed (within processpubkey) %s' % err)
|
|
||||||
return
|
|
||||||
|
|
||||||
sha = hashlib.new('sha512')
|
|
||||||
sha.update(publicSigningKey + publicEncryptionKey)
|
|
||||||
ripeHasher = hashlib.new('ripemd160')
|
|
||||||
ripeHasher.update(sha.digest())
|
|
||||||
ripe = ripeHasher.digest()
|
|
||||||
|
|
||||||
# We need to make sure that the tag on the outside of the encryption
|
|
||||||
# is the one generated from hashing these particular keys.
|
|
||||||
if tag != hashlib.sha512(hashlib.sha512(encodeVarint(addressVersion) + encodeVarint(streamNumber) + ripe).digest()).digest()[32:]:
|
|
||||||
logger.info('Someone was trying to act malicious: tag doesn\'t match the keys in this pubkey message. Ignoring it.')
|
|
||||||
return
|
|
||||||
|
|
||||||
logger.info('within recpubkey, addressVersion: %s, streamNumber: %s \n\
|
# Let us try to decrypt the pubkey
|
||||||
ripe %s\n\
|
toAddress, cryptorObject = shared.neededPubkeys[tag]
|
||||||
publicSigningKey in hex: %s\n\
|
if shared.decryptAndCheckPubkeyPayload(data, toAddress) == 'successful':
|
||||||
publicEncryptionKey in hex: %s' % (addressVersion,
|
# At this point we know that we have been waiting on this pubkey.
|
||||||
streamNumber,
|
# This function will command the workerThread to start work on
|
||||||
ripe.encode('hex'),
|
# the messages that require it.
|
||||||
publicSigningKey.encode('hex'),
|
self.possibleNewPubkey(address=toAddress)
|
||||||
publicEncryptionKey.encode('hex')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
t = (ripe, addressVersion, signedData, embeddedTime, 'yes')
|
|
||||||
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
|
||||||
|
|
||||||
fromAddress = encodeAddress(addressVersion, streamNumber, ripe)
|
|
||||||
# That this point we know that we have been waiting on this pubkey.
|
|
||||||
# This function will command the workerThread to start work on
|
|
||||||
# the messages that require it.
|
|
||||||
self.possibleNewPubkey(address = fromAddress)
|
|
||||||
|
|
||||||
# Display timing data
|
# Display timing data
|
||||||
timeRequiredToProcessPubkey = time.time(
|
timeRequiredToProcessPubkey = time.time(
|
||||||
|
@ -401,28 +349,28 @@ class objectProcessor(threading.Thread):
|
||||||
shared.numberOfMessagesProcessed += 1
|
shared.numberOfMessagesProcessed += 1
|
||||||
shared.UISignalQueue.put((
|
shared.UISignalQueue.put((
|
||||||
'updateNumberOfMessagesProcessed', 'no data'))
|
'updateNumberOfMessagesProcessed', 'no data'))
|
||||||
readPosition = 8 # bypass the nonce
|
readPosition = 20 # bypass the nonce, time, and object type
|
||||||
embeddedTime, = unpack('>I', data[readPosition:readPosition + 4])
|
|
||||||
|
"""
|
||||||
# This section is used for the transition from 32 bit time to 64 bit
|
In protocol v2, the next byte(s) was the streamNumber. But starting after
|
||||||
# time in the protocol.
|
the protocol v3 upgrade period, the next byte(s) will be a msg version
|
||||||
if embeddedTime == 0:
|
number followed by the streamNumber.
|
||||||
embeddedTime, = unpack('>Q', data[readPosition:readPosition + 8])
|
"""
|
||||||
readPosition += 8
|
#msgVersionOutsideEncryption, msgVersionOutsideEncryptionLength = decodeVarint(data[readPosition:readPosition + 9])
|
||||||
else:
|
#readPosition += msgVersionOutsideEncryptionLength
|
||||||
readPosition += 4
|
|
||||||
streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint(
|
streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 9])
|
data[readPosition:readPosition + 9])
|
||||||
readPosition += streamNumberAsClaimedByMsgLength
|
readPosition += streamNumberAsClaimedByMsgLength
|
||||||
inventoryHash = calculateInventoryHash(data)
|
inventoryHash = calculateInventoryHash(data)
|
||||||
initialDecryptionSuccessful = False
|
initialDecryptionSuccessful = False
|
||||||
# Let's check whether this is a message acknowledgement bound for us.
|
# Let's check whether this is a message acknowledgement bound for us.
|
||||||
if data[readPosition:] in shared.ackdataForWhichImWatching:
|
if data[-32:] in shared.ackdataForWhichImWatching:
|
||||||
logger.info('This msg IS an acknowledgement bound for me.')
|
logger.info('This msg IS an acknowledgement bound for me.')
|
||||||
del shared.ackdataForWhichImWatching[data[readPosition:]]
|
del shared.ackdataForWhichImWatching[data[-32:]]
|
||||||
sqlExecute('UPDATE sent SET status=? WHERE ackdata=?',
|
sqlExecute('UPDATE sent SET status=? WHERE ackdata=?',
|
||||||
'ackreceived', data[readPosition:])
|
'ackreceived', data[-32:])
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (data[readPosition:], tr.translateText("MainWindow",'Acknowledgement of the message received. %1').arg(l10n.formatTimestamp()))))
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (data[-32:], tr.translateText("MainWindow",'Acknowledgement of the message received. %1').arg(l10n.formatTimestamp()))))
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
logger.info('This was NOT an acknowledgement bound for me.')
|
logger.info('This was NOT an acknowledgement bound for me.')
|
||||||
|
@ -430,17 +378,35 @@ class objectProcessor(threading.Thread):
|
||||||
|
|
||||||
# This is not an acknowledgement bound for me. See if it is a message
|
# This is not an acknowledgement bound for me. See if it is a message
|
||||||
# bound for me by trying to decrypt it with my private keys.
|
# bound for me by trying to decrypt it with my private keys.
|
||||||
|
|
||||||
|
# This can be simplified quite a bit after 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
for key, cryptorObject in shared.myECCryptorObjects.items():
|
for key, cryptorObject in shared.myECCryptorObjects.items():
|
||||||
try:
|
try:
|
||||||
decryptedData = cryptorObject.decrypt(
|
decryptedData = cryptorObject.decrypt(data[readPosition:])
|
||||||
data[readPosition:])
|
|
||||||
toRipe = key # This is the RIPE hash of my pubkeys. We need this below to compare to the destination_ripe included in the encrypted data.
|
toRipe = key # This is the RIPE hash of my pubkeys. We need this below to compare to the destination_ripe included in the encrypted data.
|
||||||
initialDecryptionSuccessful = True
|
initialDecryptionSuccessful = True
|
||||||
logger.info('EC decryption successful using key associated with ripe hash: %s' % key.encode('hex'))
|
logger.info('EC decryption successful using key associated with ripe hash: %s. msg did NOT specify version.' % key.encode('hex'))
|
||||||
|
|
||||||
|
# We didn't bypass a msg version above as it is commented out.
|
||||||
|
# But the decryption was successful. Which means that there
|
||||||
|
# wasn't a msg version byte include in this msg.
|
||||||
|
msgObjectContainedVersion = False
|
||||||
break
|
break
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
pass
|
# What if a client sent us a msg with
|
||||||
# print 'cryptorObject.decrypt Exception:', err
|
# a msg version included? We didn't bypass it above. So
|
||||||
|
# let's try to decrypt the msg assuming that it is present.
|
||||||
|
try:
|
||||||
|
decryptedData = cryptorObject.decrypt(data[readPosition+1:]) # notice that we offset by 1 byte compared to the attempt above.
|
||||||
|
toRipe = key # This is the RIPE hash of my pubkeys. We need this below to compare to the destination_ripe included in the encrypted data.
|
||||||
|
initialDecryptionSuccessful = True
|
||||||
|
logger.info('EC decryption successful using key associated with ripe hash: %s. msg DID specifiy version.' % key.encode('hex'))
|
||||||
|
|
||||||
|
# There IS a msg version byte include in this msg.
|
||||||
|
msgObjectContainedVersion = True
|
||||||
|
break
|
||||||
|
except Exception as err:
|
||||||
|
pass
|
||||||
if not initialDecryptionSuccessful:
|
if not initialDecryptionSuccessful:
|
||||||
# This is not a message bound for me.
|
# This is not a message bound for me.
|
||||||
logger.info('Length of time program spent failing to decrypt this message: %s seconds.' % (time.time() - messageProcessingStartTime,))
|
logger.info('Length of time program spent failing to decrypt this message: %s seconds.' % (time.time() - messageProcessingStartTime,))
|
||||||
|
@ -450,12 +416,15 @@ class objectProcessor(threading.Thread):
|
||||||
toAddress = shared.myAddressesByHash[
|
toAddress = shared.myAddressesByHash[
|
||||||
toRipe] # Look up my address based on the RIPE hash.
|
toRipe] # Look up my address based on the RIPE hash.
|
||||||
readPosition = 0
|
readPosition = 0
|
||||||
messageVersion, messageVersionLength = decodeVarint(
|
if not msgObjectContainedVersion: # by which I mean "if the msg object didn't have the msg version outside of the encryption". This confusingness will be removed after the protocol v3 upgrade period.
|
||||||
decryptedData[readPosition:readPosition + 10])
|
messageVersionWithinEncryption, messageVersionWithinEncryptionLength = decodeVarint(
|
||||||
readPosition += messageVersionLength
|
decryptedData[readPosition:readPosition + 10])
|
||||||
if messageVersion != 1:
|
readPosition += messageVersionWithinEncryptionLength
|
||||||
logger.info('Cannot understand message versions other than one. Ignoring message.')
|
if messageVersionWithinEncryption != 1:
|
||||||
return
|
logger.info('Cannot understand message versions other than one. Ignoring message.')
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
messageVersionWithinEncryptionLength = 0 # This variable can disappear after the protocol v3 upgrade period is complete.
|
||||||
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(
|
sendersAddressVersionNumber, sendersAddressVersionNumberLength = decodeVarint(
|
||||||
decryptedData[readPosition:readPosition + 10])
|
decryptedData[readPosition:readPosition + 10])
|
||||||
readPosition += sendersAddressVersionNumberLength
|
readPosition += sendersAddressVersionNumberLength
|
||||||
|
@ -520,14 +489,17 @@ class objectProcessor(threading.Thread):
|
||||||
readPosition += signatureLengthLength
|
readPosition += signatureLengthLength
|
||||||
signature = decryptedData[
|
signature = decryptedData[
|
||||||
readPosition:readPosition + signatureLength]
|
readPosition:readPosition + signatureLength]
|
||||||
try:
|
if not msgObjectContainedVersion:
|
||||||
if not highlevelcrypto.verify(decryptedData[:positionOfBottomOfAckData], signature, pubSigningKey.encode('hex')):
|
# protocol v2. This can be removed after the end of the protocol v3 upgrade period.
|
||||||
logger.debug('ECDSA verify failed')
|
signedData = decryptedData[:positionOfBottomOfAckData]
|
||||||
return
|
else:
|
||||||
logger.debug('ECDSA verify passed')
|
# protocol v3
|
||||||
except Exception as err:
|
signedData = data[8:20] + encodeVarint(1) + encodeVarint(streamNumberAsClaimedByMsg) + decryptedData[:positionOfBottomOfAckData]
|
||||||
logger.debug('ECDSA verify failed %s' % err)
|
|
||||||
|
if not highlevelcrypto.verify(signedData, signature, pubSigningKey.encode('hex')):
|
||||||
|
logger.debug('ECDSA verify failed')
|
||||||
return
|
return
|
||||||
|
logger.debug('ECDSA verify passed')
|
||||||
logger.debug('As a matter of intellectual curiosity, here is the Bitcoin address associated with the keys owned by the other person: %s ..and here is the testnet address: %s. The other person must take their private signing key from Bitmessage and import it into Bitcoin (or a service like Blockchain.info) for it to be of any use. Do not use this unless you know what you are doing.' %
|
logger.debug('As a matter of intellectual curiosity, here is the Bitcoin address associated with the keys owned by the other person: %s ..and here is the testnet address: %s. The other person must take their private signing key from Bitmessage and import it into Bitcoin (or a service like Blockchain.info) for it to be of any use. Do not use this unless you know what you are doing.' %
|
||||||
(helper_bitcoin.calculateBitcoinAddressFromPubkey(pubSigningKey), helper_bitcoin.calculateTestnetAddressFromPubkey(pubSigningKey))
|
(helper_bitcoin.calculateBitcoinAddressFromPubkey(pubSigningKey), helper_bitcoin.calculateTestnetAddressFromPubkey(pubSigningKey))
|
||||||
)
|
)
|
||||||
|
@ -546,7 +518,7 @@ class objectProcessor(threading.Thread):
|
||||||
'''INSERT INTO pubkeys VALUES (?,?,?,?,?)''',
|
'''INSERT INTO pubkeys VALUES (?,?,?,?,?)''',
|
||||||
ripe.digest(),
|
ripe.digest(),
|
||||||
sendersAddressVersionNumber,
|
sendersAddressVersionNumber,
|
||||||
'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF' + '\xFF\xFF\xFF\xFF' + decryptedData[messageVersionLength:endOfThePublicKeyPosition],
|
'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF' + '\xFF\xFF\xFF\xFF' + decryptedData[messageVersionWithinEncryptionLength:endOfThePublicKeyPosition],
|
||||||
int(time.time()),
|
int(time.time()),
|
||||||
'yes')
|
'yes')
|
||||||
# This will check to see whether we happen to be awaiting this
|
# This will check to see whether we happen to be awaiting this
|
||||||
|
@ -558,7 +530,7 @@ class objectProcessor(threading.Thread):
|
||||||
'''INSERT INTO pubkeys VALUES (?,?,?,?,?)''',
|
'''INSERT INTO pubkeys VALUES (?,?,?,?,?)''',
|
||||||
ripe.digest(),
|
ripe.digest(),
|
||||||
sendersAddressVersionNumber,
|
sendersAddressVersionNumber,
|
||||||
'\x00\x00\x00\x00\x00\x00\x00\x01' + decryptedData[messageVersionLength:endOfThePublicKeyPosition],
|
'\x00\x00\x00\x00\x00\x00\x00\x01' + decryptedData[messageVersionWithinEncryptionLength:endOfThePublicKeyPosition],
|
||||||
int(time.time()),
|
int(time.time()),
|
||||||
'yes')
|
'yes')
|
||||||
# This will check to see whether we happen to be awaiting this
|
# This will check to see whether we happen to be awaiting this
|
||||||
|
@ -577,7 +549,7 @@ class objectProcessor(threading.Thread):
|
||||||
requiredPayloadLengthExtraBytes = shared.config.getint(
|
requiredPayloadLengthExtraBytes = shared.config.getint(
|
||||||
toAddress, 'payloadlengthextrabytes')
|
toAddress, 'payloadlengthextrabytes')
|
||||||
if not shared.isProofOfWorkSufficient(data, requiredNonceTrialsPerByte, requiredPayloadLengthExtraBytes):
|
if not shared.isProofOfWorkSufficient(data, requiredNonceTrialsPerByte, requiredPayloadLengthExtraBytes):
|
||||||
print 'Proof of work in msg message insufficient only because it does not meet our higher requirement.'
|
logger.info('Proof of work in msg is insufficient only because it does not meet our higher requirement.')
|
||||||
return
|
return
|
||||||
blockMessage = False # Gets set to True if the user shouldn't see the message according to black or white lists.
|
blockMessage = False # Gets set to True if the user shouldn't see the message according to black or white lists.
|
||||||
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'black': # If we are using a blacklist
|
if shared.config.get('bitmessagesettings', 'blackwhitelist') == 'black': # If we are using a blacklist
|
||||||
|
@ -667,14 +639,7 @@ class objectProcessor(threading.Thread):
|
||||||
shared.workerQueue.put(('sendbroadcast', ''))
|
shared.workerQueue.put(('sendbroadcast', ''))
|
||||||
|
|
||||||
if self.ackDataHasAVaildHeader(ackData):
|
if self.ackDataHasAVaildHeader(ackData):
|
||||||
if ackData[4:16] == addDataPadding('getpubkey'):
|
shared.checkAndShareObjectWithPeers(ackData[24:])
|
||||||
shared.checkAndSharegetpubkeyWithPeers(ackData[24:])
|
|
||||||
elif ackData[4:16] == addDataPadding('pubkey'):
|
|
||||||
shared.checkAndSharePubkeyWithPeers(ackData[24:])
|
|
||||||
elif ackData[4:16] == addDataPadding('msg'):
|
|
||||||
shared.checkAndShareMsgWithPeers(ackData[24:])
|
|
||||||
elif ackData[4:16] == addDataPadding('broadcast'):
|
|
||||||
shared.checkAndShareBroadcastWithPeers(ackData[24:])
|
|
||||||
|
|
||||||
# Display timing data
|
# Display timing data
|
||||||
timeRequiredToAttemptToDecryptMessage = time.time(
|
timeRequiredToAttemptToDecryptMessage = time.time(
|
||||||
|
@ -696,155 +661,26 @@ class objectProcessor(threading.Thread):
|
||||||
shared.UISignalQueue.put((
|
shared.UISignalQueue.put((
|
||||||
'updateNumberOfBroadcastsProcessed', 'no data'))
|
'updateNumberOfBroadcastsProcessed', 'no data'))
|
||||||
inventoryHash = calculateInventoryHash(data)
|
inventoryHash = calculateInventoryHash(data)
|
||||||
readPosition = 8 # bypass the nonce
|
readPosition = 20 # bypass the nonce, time, and object type
|
||||||
embeddedTime, = unpack('>I', data[readPosition:readPosition + 4])
|
|
||||||
|
|
||||||
# This section is used for the transition from 32 bit time to 64 bit
|
|
||||||
# time in the protocol.
|
|
||||||
if embeddedTime == 0:
|
|
||||||
embeddedTime, = unpack('>Q', data[readPosition:readPosition + 8])
|
|
||||||
readPosition += 8
|
|
||||||
else:
|
|
||||||
readPosition += 4
|
|
||||||
|
|
||||||
broadcastVersion, broadcastVersionLength = decodeVarint(
|
broadcastVersion, broadcastVersionLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 9])
|
data[readPosition:readPosition + 9])
|
||||||
readPosition += broadcastVersionLength
|
readPosition += broadcastVersionLength
|
||||||
if broadcastVersion < 1 or broadcastVersion > 3:
|
if broadcastVersion < 1 or broadcastVersion > 5:
|
||||||
logger.debug('Cannot decode incoming broadcast versions higher than 3. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.')
|
logger.info('Cannot decode incoming broadcast versions higher than 5. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.')
|
||||||
return
|
return
|
||||||
if broadcastVersion == 1:
|
if broadcastVersion == 1:
|
||||||
beginningOfPubkeyPosition = readPosition # used when we add the pubkey to our pubkey table
|
logger.info('Version 1 broadcasts are no longer supported. Not processing it at all.')
|
||||||
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(
|
if broadcastVersion in [2,4]:
|
||||||
data[readPosition:readPosition + 9])
|
"""
|
||||||
if sendersAddressVersion <= 1 or sendersAddressVersion >= 3:
|
v2 (and later v4) broadcasts are encrypted the same way the msgs were encrypted. To see if we are interested in a
|
||||||
# Cannot decode senderAddressVersion higher than 2. Assuming
|
v2 broadcast, we try to decrypt it. This was replaced with v3 (and later v5) broadcasts which include a tag which
|
||||||
# the sender isn\'t being silly, you should upgrade Bitmessage
|
we check instead, just like we do with v4 pubkeys.
|
||||||
# because this message shall be ignored.
|
v2 and v3 broadcasts should be completely obsolete after the protocol v3 upgrade period and some code can be simplified.
|
||||||
return
|
"""
|
||||||
readPosition += sendersAddressVersionLength
|
|
||||||
if sendersAddressVersion == 2:
|
|
||||||
sendersStream, sendersStreamLength = decodeVarint(
|
|
||||||
data[readPosition:readPosition + 9])
|
|
||||||
readPosition += sendersStreamLength
|
|
||||||
behaviorBitfield = data[readPosition:readPosition + 4]
|
|
||||||
readPosition += 4
|
|
||||||
sendersPubSigningKey = '\x04' + \
|
|
||||||
data[readPosition:readPosition + 64]
|
|
||||||
readPosition += 64
|
|
||||||
sendersPubEncryptionKey = '\x04' + \
|
|
||||||
data[readPosition:readPosition + 64]
|
|
||||||
readPosition += 64
|
|
||||||
endOfPubkeyPosition = readPosition
|
|
||||||
sendersHash = data[readPosition:readPosition + 20]
|
|
||||||
if sendersHash not in shared.broadcastSendersForWhichImWatching:
|
|
||||||
# Display timing data
|
|
||||||
logger.debug('Time spent deciding that we are not interested in this v1 broadcast: %s' % (time.time() - messageProcessingStartTime,))
|
|
||||||
return
|
|
||||||
# At this point, this message claims to be from sendersHash and
|
|
||||||
# we are interested in it. We still have to hash the public key
|
|
||||||
# to make sure it is truly the key that matches the hash, and
|
|
||||||
# also check the signiture.
|
|
||||||
readPosition += 20
|
|
||||||
|
|
||||||
sha = hashlib.new('sha512')
|
|
||||||
sha.update(sendersPubSigningKey + sendersPubEncryptionKey)
|
|
||||||
ripe = hashlib.new('ripemd160')
|
|
||||||
ripe.update(sha.digest())
|
|
||||||
if ripe.digest() != sendersHash:
|
|
||||||
# The sender of this message lied.
|
|
||||||
return
|
|
||||||
messageEncodingType, messageEncodingTypeLength = decodeVarint(
|
|
||||||
data[readPosition:readPosition + 9])
|
|
||||||
if messageEncodingType == 0:
|
|
||||||
return
|
|
||||||
readPosition += messageEncodingTypeLength
|
|
||||||
messageLength, messageLengthLength = decodeVarint(
|
|
||||||
data[readPosition:readPosition + 9])
|
|
||||||
readPosition += messageLengthLength
|
|
||||||
message = data[readPosition:readPosition + messageLength]
|
|
||||||
readPosition += messageLength
|
|
||||||
readPositionAtBottomOfMessage = readPosition
|
|
||||||
signatureLength, signatureLengthLength = decodeVarint(
|
|
||||||
data[readPosition:readPosition + 9])
|
|
||||||
readPosition += signatureLengthLength
|
|
||||||
signature = data[readPosition:readPosition + signatureLength]
|
|
||||||
try:
|
|
||||||
if not highlevelcrypto.verify(data[12:readPositionAtBottomOfMessage], signature, sendersPubSigningKey.encode('hex')):
|
|
||||||
logger.debug('ECDSA verify failed')
|
|
||||||
return
|
|
||||||
logger.debug('ECDSA verify passed')
|
|
||||||
except Exception as err:
|
|
||||||
logger.debug('ECDSA verify failed %s' % err)
|
|
||||||
return
|
|
||||||
# verify passed
|
|
||||||
fromAddress = encodeAddress(
|
|
||||||
sendersAddressVersion, sendersStream, ripe.digest())
|
|
||||||
logger.debug('fromAddress: %s' % fromAddress)
|
|
||||||
|
|
||||||
# Let's store the public key in case we want to reply to this person.
|
|
||||||
# We don't have the correct nonce or time (which would let us
|
|
||||||
# send out a pubkey message) so we'll just fill it with 1's. We
|
|
||||||
# won't be able to send this pubkey to others (without doing
|
|
||||||
# the proof of work ourselves, which this program is programmed
|
|
||||||
# to not do.)
|
|
||||||
sqlExecute(
|
|
||||||
'''INSERT INTO pubkeys VALUES (?,?,?,?,?)''',
|
|
||||||
ripe.digest(),
|
|
||||||
sendersAddressVersion,
|
|
||||||
'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF' + '\xFF\xFF\xFF\xFF' + data[beginningOfPubkeyPosition:endOfPubkeyPosition],
|
|
||||||
int(time.time()),
|
|
||||||
'yes')
|
|
||||||
# This will check to see whether we happen to be awaiting this
|
|
||||||
# pubkey in order to send a message. If we are, it will do the
|
|
||||||
# POW and send it.
|
|
||||||
self.possibleNewPubkey(ripe=ripe.digest())
|
|
||||||
|
|
||||||
|
|
||||||
if messageEncodingType == 2:
|
|
||||||
subject, body = decodeType2Message(message)
|
|
||||||
logger.info('Broadcast subject (first 100 characters): %s' % repr(subject)[:100])
|
|
||||||
elif messageEncodingType == 1:
|
|
||||||
body = message
|
|
||||||
subject = ''
|
|
||||||
elif messageEncodingType == 0:
|
|
||||||
logger.debug('messageEncodingType == 0. Doing nothing with the message.')
|
|
||||||
else:
|
|
||||||
body = 'Unknown encoding type.\n\n' + repr(message)
|
|
||||||
subject = ''
|
|
||||||
|
|
||||||
toAddress = '[Broadcast subscribers]'
|
|
||||||
if messageEncodingType != 0:
|
|
||||||
# Let us make sure that we haven't already received this message
|
|
||||||
if helper_inbox.isMessageAlreadyInInbox(toAddress, fromAddress, subject, body, messageEncodingType):
|
|
||||||
logger.info('This broadcast is already in our inbox. Ignoring it.')
|
|
||||||
else:
|
|
||||||
t = (inventoryHash, toAddress, fromAddress, subject, int(
|
|
||||||
time.time()), body, 'inbox', messageEncodingType, 0)
|
|
||||||
helper_inbox.insert(t)
|
|
||||||
|
|
||||||
shared.UISignalQueue.put(('displayNewInboxMessage', (
|
|
||||||
inventoryHash, toAddress, fromAddress, subject, body)))
|
|
||||||
|
|
||||||
# If we are behaving as an API then we might need to run an
|
|
||||||
# outside command to let some program know that a new
|
|
||||||
# message has arrived.
|
|
||||||
if shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):
|
|
||||||
try:
|
|
||||||
apiNotifyPath = shared.config.get(
|
|
||||||
'bitmessagesettings', 'apinotifypath')
|
|
||||||
except:
|
|
||||||
apiNotifyPath = ''
|
|
||||||
if apiNotifyPath != '':
|
|
||||||
call([apiNotifyPath, "newBroadcast"])
|
|
||||||
|
|
||||||
# Display timing data
|
|
||||||
logger.debug('Time spent processing this interesting broadcast: %s' % (time.time() - messageProcessingStartTime,))
|
|
||||||
|
|
||||||
if broadcastVersion == 2:
|
|
||||||
cleartextStreamNumber, cleartextStreamNumberLength = decodeVarint(
|
cleartextStreamNumber, cleartextStreamNumberLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += cleartextStreamNumberLength
|
readPosition += cleartextStreamNumberLength
|
||||||
|
signedData = data[8:readPosition] # This doesn't end up being used if the broadcastVersion is 2
|
||||||
initialDecryptionSuccessful = False
|
initialDecryptionSuccessful = False
|
||||||
for key, cryptorObject in shared.MyECSubscriptionCryptorObjects.items():
|
for key, cryptorObject in shared.MyECSubscriptionCryptorObjects.items():
|
||||||
try:
|
try:
|
||||||
|
@ -862,9 +698,13 @@ class objectProcessor(threading.Thread):
|
||||||
return
|
return
|
||||||
# At this point this is a broadcast I have decrypted and thus am
|
# At this point this is a broadcast I have decrypted and thus am
|
||||||
# interested in.
|
# interested in.
|
||||||
signedBroadcastVersion, readPosition = decodeVarint(
|
readPosition = 0
|
||||||
decryptedData[:10])
|
if broadcastVersion == 2:
|
||||||
beginningOfPubkeyPosition = readPosition # used when we add the pubkey to our pubkey table
|
signedBroadcastVersion, signedBroadcastVersionLength = decodeVarint(
|
||||||
|
decryptedData[:10])
|
||||||
|
readPosition += signedBroadcastVersionLength
|
||||||
|
|
||||||
|
beginningOfPubkeyPosition = readPosition # used when we add the pubkey to our pubkey table. This variable can be disposed of after the protocol v3 upgrade period because it will necessarily be at the beginning of the decryptedData; ie it will definitely equal 0
|
||||||
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(
|
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(
|
||||||
decryptedData[readPosition:readPosition + 9])
|
decryptedData[readPosition:readPosition + 9])
|
||||||
if sendersAddressVersion < 2 or sendersAddressVersion > 3:
|
if sendersAddressVersion < 2 or sendersAddressVersion > 3:
|
||||||
|
@ -920,14 +760,14 @@ class objectProcessor(threading.Thread):
|
||||||
readPosition += signatureLengthLength
|
readPosition += signatureLengthLength
|
||||||
signature = decryptedData[
|
signature = decryptedData[
|
||||||
readPosition:readPosition + signatureLength]
|
readPosition:readPosition + signatureLength]
|
||||||
try:
|
if broadcastVersion == 2: # this can be removed after the protocol v3 upgrade period
|
||||||
if not highlevelcrypto.verify(decryptedData[:readPositionAtBottomOfMessage], signature, sendersPubSigningKey.encode('hex')):
|
signedData = decryptedData[:readPositionAtBottomOfMessage]
|
||||||
logger.debug('ECDSA verify failed')
|
else:
|
||||||
return
|
signedData += decryptedData[:readPositionAtBottomOfMessage]
|
||||||
logger.debug('ECDSA verify passed')
|
if not highlevelcrypto.verify(signedData, signature, sendersPubSigningKey.encode('hex')):
|
||||||
except Exception as err:
|
logger.debug('ECDSA verify failed')
|
||||||
logger.debug('ECDSA verify failed %s' % err)
|
|
||||||
return
|
return
|
||||||
|
logger.debug('ECDSA verify passed')
|
||||||
# verify passed
|
# verify passed
|
||||||
|
|
||||||
# Let's store the public key in case we want to reply to this
|
# Let's store the public key in case we want to reply to this
|
||||||
|
@ -988,7 +828,8 @@ class objectProcessor(threading.Thread):
|
||||||
# Display timing data
|
# Display timing data
|
||||||
logger.info('Time spent processing this interesting broadcast: %s' % (time.time() - messageProcessingStartTime,))
|
logger.info('Time spent processing this interesting broadcast: %s' % (time.time() - messageProcessingStartTime,))
|
||||||
|
|
||||||
if broadcastVersion == 3:
|
if broadcastVersion in [3,5]:
|
||||||
|
# broadcast version 3 should be completely obsolete after the end of the protocol v3 upgrade period
|
||||||
cleartextStreamNumber, cleartextStreamNumberLength = decodeVarint(
|
cleartextStreamNumber, cleartextStreamNumberLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += cleartextStreamNumberLength
|
readPosition += cleartextStreamNumberLength
|
||||||
|
@ -998,21 +839,28 @@ class objectProcessor(threading.Thread):
|
||||||
logger.debug('We\'re not interested in this broadcast.')
|
logger.debug('We\'re not interested in this broadcast.')
|
||||||
return
|
return
|
||||||
# We are interested in this broadcast because of its tag.
|
# We are interested in this broadcast because of its tag.
|
||||||
|
signedData = data[8:readPosition] # We're going to add some more data which is signed further down.
|
||||||
cryptorObject = shared.MyECSubscriptionCryptorObjects[embeddedTag]
|
cryptorObject = shared.MyECSubscriptionCryptorObjects[embeddedTag]
|
||||||
try:
|
try:
|
||||||
decryptedData = cryptorObject.decrypt(data[readPosition:])
|
decryptedData = cryptorObject.decrypt(data[readPosition:])
|
||||||
logger.debug('EC decryption successful')
|
logger.debug('EC decryption successful')
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.debug('Broadcast version 3 decryption Unsuccessful.')
|
logger.debug('Broadcast version %s decryption Unsuccessful.' % broadcastVersion)
|
||||||
return
|
return
|
||||||
|
|
||||||
signedBroadcastVersion, readPosition = decodeVarint(
|
# broadcast version 3 includes the broadcast version at the beginning
|
||||||
decryptedData[:10])
|
# of the decryptedData. Broadcast version 5 doesn't.
|
||||||
|
readPosition = 0
|
||||||
|
if broadcastVersion == 3: # This section can be removed after the protocol v3 upgrade period
|
||||||
|
signedBroadcastVersion, signedBroadcastVersionLength = decodeVarint(
|
||||||
|
decryptedData[:10])
|
||||||
|
readPosition += signedBroadcastVersionLength
|
||||||
|
|
||||||
beginningOfPubkeyPosition = readPosition # used when we add the pubkey to our pubkey table
|
beginningOfPubkeyPosition = readPosition # used when we add the pubkey to our pubkey table
|
||||||
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(
|
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(
|
||||||
decryptedData[readPosition:readPosition + 9])
|
decryptedData[readPosition:readPosition + 9])
|
||||||
if sendersAddressVersion < 4:
|
if sendersAddressVersion < 4:
|
||||||
logger.info('Cannot decode senderAddressVersion less than 4 for broadcast version number 3. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.')
|
logger.info('Cannot decode senderAddressVersion less than 4 for broadcast version number 3 or 4. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.')
|
||||||
return
|
return
|
||||||
readPosition += sendersAddressVersionLength
|
readPosition += sendersAddressVersionLength
|
||||||
sendersStream, sendersStreamLength = decodeVarint(
|
sendersStream, sendersStreamLength = decodeVarint(
|
||||||
|
@ -1067,14 +915,14 @@ class objectProcessor(threading.Thread):
|
||||||
readPosition += signatureLengthLength
|
readPosition += signatureLengthLength
|
||||||
signature = decryptedData[
|
signature = decryptedData[
|
||||||
readPosition:readPosition + signatureLength]
|
readPosition:readPosition + signatureLength]
|
||||||
try:
|
if broadcastVersion == 3: # broadcastVersion 3 should be completely unused after the end of the protocol v3 upgrade period
|
||||||
if not highlevelcrypto.verify(decryptedData[:readPositionAtBottomOfMessage], signature, sendersPubSigningKey.encode('hex')):
|
signedData = decryptedData[:readPositionAtBottomOfMessage]
|
||||||
logger.debug('ECDSA verify failed')
|
elif broadcastVersion == 5:
|
||||||
return
|
signedData += decryptedData[:readPositionAtBottomOfMessage]
|
||||||
logger.debug('ECDSA verify passed')
|
if not highlevelcrypto.verify(signedData, signature, sendersPubSigningKey.encode('hex')):
|
||||||
except Exception as err:
|
logger.debug('ECDSA verify failed')
|
||||||
logger.debug('ECDSA verify failed %s' % err)
|
|
||||||
return
|
return
|
||||||
|
logger.debug('ECDSA verify passed')
|
||||||
# verify passed
|
# verify passed
|
||||||
|
|
||||||
fromAddress = encodeAddress(
|
fromAddress = encodeAddress(
|
||||||
|
@ -1168,23 +1016,27 @@ class objectProcessor(threading.Thread):
|
||||||
logger.info('The length of ackData is unreasonably short. Not sending ackData.')
|
logger.info('The length of ackData is unreasonably short. Not sending ackData.')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
magic,command,payload_length,checksum = shared.Header.unpack(ackData[:shared.Header.size])
|
magic,command,payloadLength,checksum = shared.Header.unpack(ackData[:shared.Header.size])
|
||||||
if magic != 0xE9BEB4D9:
|
if magic != 0xE9BEB4D9:
|
||||||
logger.info('Ackdata magic bytes were wrong. Not sending ackData.')
|
logger.info('Ackdata magic bytes were wrong. Not sending ackData.')
|
||||||
return False
|
return False
|
||||||
payload = ackData[shared.Header.size:]
|
payload = ackData[shared.Header.size:]
|
||||||
if len(payload) != payload_length:
|
if len(payload) != payloadLength:
|
||||||
logger.info('ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.')
|
logger.info('ackData payload length doesn\'t match the payload length specified in the header. Not sending ackdata.')
|
||||||
return False
|
return False
|
||||||
if payload_length > 180000000: # If the size of the message is greater than 180MB, ignore it.
|
if payloadLength > 1600100: # ~1.6 MB which is the maximum possible size of an inv message.
|
||||||
|
"""
|
||||||
|
The largest message should be either an inv or a getdata message at 1.6 MB in size.
|
||||||
|
That doesn't mean that the object may be that big. The
|
||||||
|
shared.checkAndShareObjectWithPeers function will verify that it is no larger than
|
||||||
|
2^18 bytes.
|
||||||
|
"""
|
||||||
return False
|
return False
|
||||||
if checksum != hashlib.sha512(payload).digest()[0:4]: # test the checksum in the message.
|
if checksum != hashlib.sha512(payload).digest()[0:4]: # test the checksum in the message.
|
||||||
logger.info('ackdata checksum wrong. Not sending ackdata.')
|
logger.info('ackdata checksum wrong. Not sending ackdata.')
|
||||||
return False
|
return False
|
||||||
if (command != addDataPadding('getpubkey') and
|
command = command.rstrip('\x00')
|
||||||
command != addDataPadding('pubkey') and
|
if command != 'object':
|
||||||
command != addDataPadding('msg') and
|
|
||||||
command != addDataPadding('broadcast')):
|
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
@ -64,8 +64,6 @@ class outgoingSynSender(threading.Thread):
|
||||||
shared.alreadyAttemptedConnectionsListLock.acquire()
|
shared.alreadyAttemptedConnectionsListLock.acquire()
|
||||||
shared.alreadyAttemptedConnectionsList[peer] = 0
|
shared.alreadyAttemptedConnectionsList[peer] = 0
|
||||||
shared.alreadyAttemptedConnectionsListLock.release()
|
shared.alreadyAttemptedConnectionsListLock.release()
|
||||||
timeNodeLastSeen = shared.knownNodes[
|
|
||||||
self.streamNumber][peer]
|
|
||||||
if peer.host.find(':') == -1:
|
if peer.host.find(':') == -1:
|
||||||
address_family = socket.AF_INET
|
address_family = socket.AF_INET
|
||||||
else:
|
else:
|
||||||
|
@ -83,7 +81,10 @@ class outgoingSynSender(threading.Thread):
|
||||||
So let us remove the offending address from our knownNodes file.
|
So let us remove the offending address from our knownNodes file.
|
||||||
"""
|
"""
|
||||||
shared.knownNodesLock.acquire()
|
shared.knownNodesLock.acquire()
|
||||||
del shared.knownNodes[self.streamNumber][peer]
|
try:
|
||||||
|
del shared.knownNodes[self.streamNumber][peer]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
shared.knownNodesLock.release()
|
shared.knownNodesLock.release()
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'deleting ', peer, 'from shared.knownNodes because it caused a socks.socksocket exception. We must not be 64-bit compatible.'
|
print 'deleting ', peer, 'from shared.knownNodes because it caused a socks.socksocket exception. We must not be 64-bit compatible.'
|
||||||
|
@ -169,14 +170,24 @@ class outgoingSynSender(threading.Thread):
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Could NOT connect to', peer, 'during outgoing attempt.', err
|
print 'Could NOT connect to', peer, 'during outgoing attempt.', err
|
||||||
|
|
||||||
timeLastSeen = shared.knownNodes[
|
deletedPeer = None
|
||||||
self.streamNumber][peer]
|
with shared.knownNodesLock:
|
||||||
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the shared.knownNodes data-structure.
|
"""
|
||||||
shared.knownNodesLock.acquire()
|
It is remotely possible that peer is no longer in shared.knownNodes.
|
||||||
del shared.knownNodes[self.streamNumber][peer]
|
This could happen if two outgoingSynSender threads both try to
|
||||||
shared.knownNodesLock.release()
|
connect to the same peer, both fail, and then both try to remove
|
||||||
|
it from shared.knownNodes. This is unlikely because of the
|
||||||
|
alreadyAttemptedConnectionsList but because we clear that list once
|
||||||
|
every half hour, it can happen.
|
||||||
|
"""
|
||||||
|
if peer in shared.knownNodes[self.streamNumber]:
|
||||||
|
timeLastSeen = shared.knownNodes[self.streamNumber][peer]
|
||||||
|
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the shared.knownNodes data-structure.
|
||||||
|
del shared.knownNodes[self.streamNumber][peer]
|
||||||
|
deletedPeer = peer
|
||||||
|
if deletedPeer:
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'deleting ', peer, 'from shared.knownNodes because it is more than 48 hours old and we could not connect to it.'
|
print 'deleting', peer, 'from shared.knownNodes because it is more than 48 hours old and we could not connect to it.'
|
||||||
|
|
||||||
except socks.Socks5AuthError as err:
|
except socks.Socks5AuthError as err:
|
||||||
shared.UISignalQueue.put((
|
shared.UISignalQueue.put((
|
||||||
|
@ -195,14 +206,24 @@ class outgoingSynSender(threading.Thread):
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Could NOT connect to', peer, 'during outgoing attempt.', err
|
print 'Could NOT connect to', peer, 'during outgoing attempt.', err
|
||||||
|
|
||||||
timeLastSeen = shared.knownNodes[
|
deletedPeer = None
|
||||||
self.streamNumber][peer]
|
with shared.knownNodesLock:
|
||||||
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
|
"""
|
||||||
shared.knownNodesLock.acquire()
|
It is remotely possible that peer is no longer in shared.knownNodes.
|
||||||
del shared.knownNodes[self.streamNumber][peer]
|
This could happen if two outgoingSynSender threads both try to
|
||||||
shared.knownNodesLock.release()
|
connect to the same peer, both fail, and then both try to remove
|
||||||
with shared.printLock:
|
it from shared.knownNodes. This is unlikely because of the
|
||||||
print 'deleting ', peer, 'from knownNodes because it is more than 48 hours old and we could not connect to it.'
|
alreadyAttemptedConnectionsList but because we clear that list once
|
||||||
|
every half hour, it can happen.
|
||||||
|
"""
|
||||||
|
if peer in shared.knownNodes[self.streamNumber]:
|
||||||
|
timeLastSeen = shared.knownNodes[self.streamNumber][peer]
|
||||||
|
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the shared.knownNodes data-structure.
|
||||||
|
del shared.knownNodes[self.streamNumber][peer]
|
||||||
|
deletedPeer = peer
|
||||||
|
if deletedPeer:
|
||||||
|
with shared.printLock:
|
||||||
|
print 'deleting', peer, 'from shared.knownNodes because it is more than 48 hours old and we could not connect to it.'
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
|
|
|
@ -8,6 +8,7 @@ import socket
|
||||||
import random
|
import random
|
||||||
from struct import unpack, pack
|
from struct import unpack, pack
|
||||||
import sys
|
import sys
|
||||||
|
import traceback
|
||||||
#import string
|
#import string
|
||||||
#from subprocess import call # used when the API must execute an outside program
|
#from subprocess import call # used when the API must execute an outside program
|
||||||
#from pyelliptic.openssl import OpenSSL
|
#from pyelliptic.openssl import OpenSSL
|
||||||
|
@ -21,7 +22,6 @@ from helper_generic import addDataPadding, isHostInPrivateIPRange
|
||||||
from helper_sql import *
|
from helper_sql import *
|
||||||
#import tr
|
#import tr
|
||||||
from debug import logger
|
from debug import logger
|
||||||
#from bitmessagemain import shared.lengthOfTimeToLeaveObjectsInInventory, shared.lengthOfTimeToHoldOnToAllPubkeys, shared.maximumAgeOfAnObjectThatIAmWillingToAccept, shared.maximumAgeOfObjectsThatIAdvertiseToOthers, shared.maximumAgeOfNodesThatIAdvertiseToOthers, shared.numberOfObjectsThatWeHaveYetToGetPerPeer, shared.neededPubkeys
|
|
||||||
|
|
||||||
# This thread is created either by the synSenderThread(for outgoing
|
# This thread is created either by the synSenderThread(for outgoing
|
||||||
# connections) or the singleListenerThread(for incoming connections).
|
# connections) or the singleListenerThread(for incoming connections).
|
||||||
|
@ -65,11 +65,25 @@ class receiveDataThread(threading.Thread):
|
||||||
print 'receiveDataThread starting. ID', str(id(self)) + '. The size of the shared.connectedHostsList is now', len(shared.connectedHostsList)
|
print 'receiveDataThread starting. ID', str(id(self)) + '. The size of the shared.connectedHostsList is now', len(shared.connectedHostsList)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
if shared.config.getint('bitmessagesettings', 'maxdownloadrate') == 0:
|
||||||
|
downloadRateLimitBytes = float("inf")
|
||||||
|
else:
|
||||||
|
downloadRateLimitBytes = shared.config.getint('bitmessagesettings', 'maxdownloadrate') * 1000
|
||||||
|
with shared.receiveDataLock:
|
||||||
|
while shared.numberOfBytesReceivedLastSecond >= downloadRateLimitBytes:
|
||||||
|
if int(time.time()) == shared.lastTimeWeResetBytesReceived:
|
||||||
|
# If it's still the same second that it was last time then sleep.
|
||||||
|
time.sleep(0.3)
|
||||||
|
else:
|
||||||
|
# It's a new second. Let us clear the shared.numberOfBytesReceivedLastSecond.
|
||||||
|
shared.lastTimeWeResetBytesReceived = int(time.time())
|
||||||
|
shared.numberOfBytesReceivedLastSecond = 0
|
||||||
dataLen = len(self.data)
|
dataLen = len(self.data)
|
||||||
try:
|
try:
|
||||||
dataRecv = self.sock.recv(4096)
|
dataRecv = self.sock.recv(1024)
|
||||||
self.data += dataRecv
|
self.data += dataRecv
|
||||||
shared.numberOfBytesReceived += len(dataRecv)
|
shared.numberOfBytesReceived += len(dataRecv) # for the 'network status' UI tab. The UI clears this value whenever it updates.
|
||||||
|
shared.numberOfBytesReceivedLastSecond += len(dataRecv) # for the download rate limit
|
||||||
except socket.timeout:
|
except socket.timeout:
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Timeout occurred waiting for data from', self.peer, '. Closing receiveData thread. (ID:', str(id(self)) + ')'
|
print 'Timeout occurred waiting for data from', self.peer, '. Closing receiveData thread. (ID:', str(id(self)) + ')'
|
||||||
|
@ -117,7 +131,7 @@ class receiveDataThread(threading.Thread):
|
||||||
if magic != 0xE9BEB4D9:
|
if magic != 0xE9BEB4D9:
|
||||||
self.data = ""
|
self.data = ""
|
||||||
return
|
return
|
||||||
if payloadLength > 20000000:
|
if payloadLength > 1600100: # ~1.6 MB which is the maximum possible size of an inv message.
|
||||||
logger.info('The incoming message, which we have not yet download, is too large. Ignoring it. (unfortunately there is no way to tell the other node to stop sending it except to disconnect.) Message size: %s' % payloadLength)
|
logger.info('The incoming message, which we have not yet download, is too large. Ignoring it. (unfortunately there is no way to tell the other node to stop sending it except to disconnect.) Message size: %s' % payloadLength)
|
||||||
self.data = self.data[payloadLength + shared.Header.size:]
|
self.data = self.data[payloadLength + shared.Header.size:]
|
||||||
del magic,command,payloadLength,checksum # we don't need these anymore and better to clean them now before the recursive call rather than after
|
del magic,command,payloadLength,checksum # we don't need these anymore and better to clean them now before the recursive call rather than after
|
||||||
|
@ -146,33 +160,32 @@ class receiveDataThread(threading.Thread):
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'remoteCommand', repr(command), ' from', self.peer
|
print 'remoteCommand', repr(command), ' from', self.peer
|
||||||
|
|
||||||
#TODO: Use a dispatcher here
|
try:
|
||||||
if not self.connectionIsOrWasFullyEstablished:
|
#TODO: Use a dispatcher here
|
||||||
if command == 'version':
|
if command == 'error':
|
||||||
self.recversion(payload)
|
self.recerror(payload)
|
||||||
elif command == 'verack':
|
elif not self.connectionIsOrWasFullyEstablished:
|
||||||
self.recverack()
|
if command == 'version':
|
||||||
else:
|
self.recversion(payload)
|
||||||
if command == 'addr':
|
elif command == 'verack':
|
||||||
self.recaddr(payload)
|
self.recverack()
|
||||||
elif command == 'getpubkey':
|
else:
|
||||||
shared.checkAndSharegetpubkeyWithPeers(payload)
|
if command == 'addr':
|
||||||
elif command == 'pubkey':
|
self.recaddr(payload)
|
||||||
self.recpubkey(payload)
|
elif command == 'inv':
|
||||||
elif command == 'inv':
|
self.recinv(payload)
|
||||||
self.recinv(payload)
|
elif command == 'getdata':
|
||||||
elif command == 'getdata':
|
self.recgetdata(payload)
|
||||||
self.recgetdata(payload)
|
elif command == 'object':
|
||||||
elif command == 'msg':
|
self.recobject(payload)
|
||||||
self.recmsg(payload)
|
elif command == 'ping':
|
||||||
elif command == 'broadcast':
|
self.sendpong(payload)
|
||||||
self.recbroadcast(payload)
|
#elif command == 'pong':
|
||||||
elif command == 'ping':
|
# pass
|
||||||
self.sendpong(payload)
|
except varintDecodeError as e:
|
||||||
#elif command == 'pong':
|
logger.debug("There was a problem with a varint while processing a message from the wire. Some details: %s" % e)
|
||||||
# pass
|
except Exception as e:
|
||||||
#elif command == 'alert':
|
logger.critical("Critical error in a receiveDataThread: \n%s" % traceback.format_exc())
|
||||||
# pass
|
|
||||||
|
|
||||||
del payload
|
del payload
|
||||||
self.data = self.data[payloadLength + shared.Header.size:] # take this message out and then process the next message
|
self.data = self.data[payloadLength + shared.Header.size:] # take this message out and then process the next message
|
||||||
|
@ -273,12 +286,10 @@ class receiveDataThread(threading.Thread):
|
||||||
self.sendBigInv()
|
self.sendBigInv()
|
||||||
|
|
||||||
def sendBigInv(self):
|
def sendBigInv(self):
|
||||||
# Select all hashes which are younger than two days old and in this
|
# Select all hashes for objects in this stream.
|
||||||
# stream.
|
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''SELECT hash FROM inventory WHERE ((receivedtime>? and objecttype<>'pubkey') or (receivedtime>? and objecttype='pubkey')) and streamnumber=?''',
|
'''SELECT hash FROM inventory WHERE expirestime>? and streamnumber=?''',
|
||||||
int(time.time()) - shared.maximumAgeOfObjectsThatIAdvertiseToOthers,
|
int(time.time()),
|
||||||
int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys,
|
|
||||||
self.streamNumber)
|
self.streamNumber)
|
||||||
bigInvList = {}
|
bigInvList = {}
|
||||||
for row in queryreturn:
|
for row in queryreturn:
|
||||||
|
@ -290,8 +301,8 @@ class receiveDataThread(threading.Thread):
|
||||||
with shared.inventoryLock:
|
with shared.inventoryLock:
|
||||||
for hash, storedValue in shared.inventory.items():
|
for hash, storedValue in shared.inventory.items():
|
||||||
if hash not in self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware:
|
if hash not in self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware:
|
||||||
objectType, streamNumber, payload, receivedTime, tag = storedValue
|
objectType, streamNumber, payload, expiresTime, tag = storedValue
|
||||||
if streamNumber == self.streamNumber and receivedTime > int(time.time()) - shared.maximumAgeOfObjectsThatIAdvertiseToOthers:
|
if streamNumber == self.streamNumber and expiresTime > int(time.time()):
|
||||||
bigInvList[hash] = 0
|
bigInvList[hash] = 0
|
||||||
numberOfObjectsInInvMessage = 0
|
numberOfObjectsInInvMessage = 0
|
||||||
payload = ''
|
payload = ''
|
||||||
|
@ -326,79 +337,59 @@ class receiveDataThread(threading.Thread):
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Timing attack mitigation: Sleeping for', sleepTime, 'seconds.'
|
print 'Timing attack mitigation: Sleeping for', sleepTime, 'seconds.'
|
||||||
time.sleep(sleepTime)
|
time.sleep(sleepTime)
|
||||||
|
|
||||||
# We have received a broadcast message
|
def recerror(self, data):
|
||||||
def recbroadcast(self, data):
|
|
||||||
self.messageProcessingStartTime = time.time()
|
|
||||||
|
|
||||||
shared.checkAndShareBroadcastWithPeers(data)
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. Sleeping
|
The remote node has been polite enough to send you an error message.
|
||||||
will help guarantee that we can process messages faster than a remote
|
"""
|
||||||
node can send them. If we fall behind, the attacker could observe that
|
fatalStatus, readPosition = decodeVarint(data[:10])
|
||||||
we are are slowing down the rate at which we request objects from the
|
banTime, banTimeLength = decodeVarint(data[readPosition:readPosition+10])
|
||||||
|
readPosition += banTimeLength
|
||||||
|
inventoryVectorLength, inventoryVectorLengthLength = decodeVarint(data[readPosition:readPosition+10])
|
||||||
|
if inventoryVectorLength > 100:
|
||||||
|
return
|
||||||
|
readPosition += inventoryVectorLengthLength
|
||||||
|
inventoryVector = data[readPosition:readPosition+inventoryVectorLength]
|
||||||
|
readPosition += inventoryVectorLength
|
||||||
|
errorTextLength, errorTextLengthLength = decodeVarint(data[readPosition:readPosition+10])
|
||||||
|
if errorTextLength > 1000:
|
||||||
|
return
|
||||||
|
readPosition += errorTextLengthLength
|
||||||
|
errorText = data[readPosition:readPosition+errorTextLength]
|
||||||
|
if fatalStatus == 0:
|
||||||
|
fatalHumanFriendly = 'Warning'
|
||||||
|
elif fatalStatus == 1:
|
||||||
|
fatalHumanFriendly = 'Error'
|
||||||
|
elif fatalStatus == 2:
|
||||||
|
fatalHumanFriendly = 'Fatal'
|
||||||
|
message = '%s message received from %s: %s.' % (fatalHumanFriendly, self.peer, errorText)
|
||||||
|
if inventoryVector:
|
||||||
|
message += " This concerns object %s" % inventoryVector.encode('hex')
|
||||||
|
if banTime > 0:
|
||||||
|
message += " Remote node says that the ban time is %s" % banTime
|
||||||
|
logger.error(message)
|
||||||
|
|
||||||
|
|
||||||
|
def recobject(self, data):
|
||||||
|
self.messageProcessingStartTime = time.time()
|
||||||
|
lengthOfTimeWeShouldUseToProcessThisMessage = shared.checkAndShareObjectWithPeers(data)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Sleeping will help guarantee that we can process messages faster than a
|
||||||
|
remote node can send them. If we fall behind, the attacker could observe
|
||||||
|
that we are are slowing down the rate at which we request objects from the
|
||||||
network which would indicate that we own a particular address (whichever
|
network which would indicate that we own a particular address (whichever
|
||||||
one to which they are sending all of their attack messages). Note
|
one to which they are sending all of their attack messages). Note
|
||||||
that if an attacker connects to a target with many connections, this
|
that if an attacker connects to a target with many connections, this
|
||||||
mitigation mechanism might not be sufficient.
|
mitigation mechanism might not be sufficient.
|
||||||
"""
|
"""
|
||||||
if len(data) > 100000000: # Size is greater than 100 megabytes
|
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - (time.time() - self.messageProcessingStartTime)
|
||||||
lengthOfTimeWeShouldUseToProcessThisMessage = 100 # seconds.
|
|
||||||
elif len(data) > 10000000: # Between 100 and 10 megabytes
|
|
||||||
lengthOfTimeWeShouldUseToProcessThisMessage = 20 # seconds.
|
|
||||||
elif len(data) > 1000000: # Between 10 and 1 megabyte
|
|
||||||
lengthOfTimeWeShouldUseToProcessThisMessage = 3 # seconds.
|
|
||||||
else: # Less than 1 megabyte
|
|
||||||
lengthOfTimeWeShouldUseToProcessThisMessage = .6 # seconds.
|
|
||||||
|
|
||||||
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - \
|
|
||||||
(time.time() - self.messageProcessingStartTime)
|
|
||||||
self._sleepForTimingAttackMitigation(sleepTime)
|
|
||||||
|
|
||||||
# We have received a msg message.
|
|
||||||
def recmsg(self, data):
|
|
||||||
self.messageProcessingStartTime = time.time()
|
|
||||||
|
|
||||||
shared.checkAndShareMsgWithPeers(data)
|
|
||||||
|
|
||||||
"""
|
|
||||||
Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. Sleeping
|
|
||||||
will help guarantee that we can process messages faster than a remote
|
|
||||||
node can send them. If we fall behind, the attacker could observe that
|
|
||||||
we are are slowing down the rate at which we request objects from the
|
|
||||||
network which would indicate that we own a particular address (whichever
|
|
||||||
one to which they are sending all of their attack messages). Note
|
|
||||||
that if an attacker connects to a target with many connections, this
|
|
||||||
mitigation mechanism might not be sufficient.
|
|
||||||
"""
|
|
||||||
if len(data) > 100000000: # Size is greater than 100 megabytes
|
|
||||||
lengthOfTimeWeShouldUseToProcessThisMessage = 100 # seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 MB message: 3.7 seconds.
|
|
||||||
elif len(data) > 10000000: # Between 100 and 10 megabytes
|
|
||||||
lengthOfTimeWeShouldUseToProcessThisMessage = 20 # seconds. Actual length of time it took my computer to decrypt and verify the signature of a 10 MB message: 0.53 seconds. Actual length of time it takes in practice when processing a real message: 1.44 seconds.
|
|
||||||
elif len(data) > 1000000: # Between 10 and 1 megabyte
|
|
||||||
lengthOfTimeWeShouldUseToProcessThisMessage = 3 # seconds. Actual length of time it took my computer to decrypt and verify the signature of a 1 MB message: 0.18 seconds. Actual length of time it takes in practice when processing a real message: 0.30 seconds.
|
|
||||||
else: # Less than 1 megabyte
|
|
||||||
lengthOfTimeWeShouldUseToProcessThisMessage = .6 # seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 KB message: 0.15 seconds. Actual length of time it takes in practice when processing a real message: 0.25 seconds.
|
|
||||||
|
|
||||||
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - \
|
|
||||||
(time.time() - self.messageProcessingStartTime)
|
|
||||||
self._sleepForTimingAttackMitigation(sleepTime)
|
|
||||||
|
|
||||||
# We have received a pubkey
|
|
||||||
def recpubkey(self, data):
|
|
||||||
self.pubkeyProcessingStartTime = time.time()
|
|
||||||
|
|
||||||
shared.checkAndSharePubkeyWithPeers(data)
|
|
||||||
|
|
||||||
lengthOfTimeWeShouldUseToProcessThisMessage = .1
|
|
||||||
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - \
|
|
||||||
(time.time() - self.pubkeyProcessingStartTime)
|
|
||||||
self._sleepForTimingAttackMitigation(sleepTime)
|
self._sleepForTimingAttackMitigation(sleepTime)
|
||||||
|
|
||||||
|
|
||||||
# We have received an inv message
|
# We have received an inv message
|
||||||
def recinv(self, data):
|
def recinv(self, data):
|
||||||
totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers = 0 # this counts duplicates seperately because they take up memory
|
totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers = 0 # this counts duplicates separately because they take up memory
|
||||||
if len(shared.numberOfObjectsThatWeHaveYetToGetPerPeer) > 0:
|
if len(shared.numberOfObjectsThatWeHaveYetToGetPerPeer) > 0:
|
||||||
for key, value in shared.numberOfObjectsThatWeHaveYetToGetPerPeer.items():
|
for key, value in shared.numberOfObjectsThatWeHaveYetToGetPerPeer.items():
|
||||||
totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers += value
|
totalNumberOfobjectsThatWeHaveYetToGetFromAllPeers += value
|
||||||
|
@ -474,34 +465,27 @@ class receiveDataThread(threading.Thread):
|
||||||
shared.numberOfInventoryLookupsPerformed += 1
|
shared.numberOfInventoryLookupsPerformed += 1
|
||||||
shared.inventoryLock.acquire()
|
shared.inventoryLock.acquire()
|
||||||
if hash in shared.inventory:
|
if hash in shared.inventory:
|
||||||
objectType, streamNumber, payload, receivedTime, tag = shared.inventory[
|
objectType, streamNumber, payload, expiresTime, tag = shared.inventory[hash]
|
||||||
hash]
|
|
||||||
shared.inventoryLock.release()
|
shared.inventoryLock.release()
|
||||||
self.sendData(objectType, payload)
|
self.sendObject(payload)
|
||||||
else:
|
else:
|
||||||
shared.inventoryLock.release()
|
shared.inventoryLock.release()
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''select objecttype, payload from inventory where hash=?''',
|
'''select payload from inventory where hash=? and expirestime>=?''',
|
||||||
hash)
|
hash,
|
||||||
|
int(time.time()))
|
||||||
if queryreturn != []:
|
if queryreturn != []:
|
||||||
for row in queryreturn:
|
for row in queryreturn:
|
||||||
objectType, payload = row
|
payload, = row
|
||||||
self.sendData(objectType, payload)
|
self.sendObject(payload)
|
||||||
else:
|
else:
|
||||||
print 'Someone asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. That shouldn\'t have happened.'
|
logger.warning('%s asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. We probably cleaned it out after advertising it but before they got around to asking for it.' % self.peer)
|
||||||
|
|
||||||
# Our peer has requested (in a getdata message) that we send an object.
|
# Our peer has requested (in a getdata message) that we send an object.
|
||||||
def sendData(self, objectType, payload):
|
def sendObject(self, payload):
|
||||||
if (objectType != 'pubkey' and
|
|
||||||
objectType != 'getpubkey' and
|
|
||||||
objectType != 'msg' and
|
|
||||||
objectType != 'broadcast'):
|
|
||||||
sys.stderr.write(
|
|
||||||
'Error: sendData has been asked to send a strange objectType: %s\n' % str(objectType))
|
|
||||||
return
|
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'sending', objectType
|
print 'sending an object.'
|
||||||
self.sendDataThreadQueue.put((0, 'sendRawData', shared.CreatePacket(objectType, payload)))
|
self.sendDataThreadQueue.put((0, 'sendRawData', shared.CreatePacket('object',payload)))
|
||||||
|
|
||||||
|
|
||||||
def _checkIPv4Address(self, host, hostFromAddrMessage):
|
def _checkIPv4Address(self, host, hostFromAddrMessage):
|
||||||
|
@ -730,12 +714,25 @@ class receiveDataThread(threading.Thread):
|
||||||
"""
|
"""
|
||||||
return
|
return
|
||||||
self.remoteProtocolVersion, = unpack('>L', data[:4])
|
self.remoteProtocolVersion, = unpack('>L', data[:4])
|
||||||
if self.remoteProtocolVersion <= 1:
|
if self.remoteProtocolVersion < 3:
|
||||||
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
|
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Closing connection to old protocol version 1 node: ', self.peer
|
print 'Closing connection to old protocol version', self.remoteProtocolVersion, 'node: ', self.peer
|
||||||
return
|
return
|
||||||
# print 'remoteProtocolVersion', self.remoteProtocolVersion
|
timestamp, = unpack('>Q', data[12:20])
|
||||||
|
timeOffset = timestamp - int(time.time())
|
||||||
|
if timeOffset > 3600:
|
||||||
|
self.sendDataThreadQueue.put((0, 'sendRawData', shared.assembleErrorMessage(fatal=2, errorText="Your time is too far in the future compared to mine. Closing connection.")))
|
||||||
|
logger.info("%s's time is too far in the future (%s seconds). Closing connection to it." % (self.peer, timeOffset))
|
||||||
|
time.sleep(2)
|
||||||
|
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
|
||||||
|
return
|
||||||
|
if timeOffset < -3600:
|
||||||
|
self.sendDataThreadQueue.put((0, 'sendRawData', shared.assembleErrorMessage(fatal=2, errorText="Your time is too far in the past compared to mine. Closing connection.")))
|
||||||
|
logger.info("%s's time is too far in the past (timeOffset %s seconds). Closing connection to it." % (self.peer, timeOffset))
|
||||||
|
time.sleep(2)
|
||||||
|
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
|
||||||
|
return
|
||||||
self.myExternalIP = socket.inet_ntoa(data[40:44])
|
self.myExternalIP = socket.inet_ntoa(data[40:44])
|
||||||
# print 'myExternalIP', self.myExternalIP
|
# print 'myExternalIP', self.myExternalIP
|
||||||
self.remoteNodeIncomingPort, = unpack('>H', data[70:72])
|
self.remoteNodeIncomingPort, = unpack('>H', data[70:72])
|
||||||
|
@ -751,7 +748,7 @@ class receiveDataThread(threading.Thread):
|
||||||
self.streamNumber, lengthOfRemoteStreamNumber = decodeVarint(
|
self.streamNumber, lengthOfRemoteStreamNumber = decodeVarint(
|
||||||
data[readPosition:])
|
data[readPosition:])
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Remote node useragent:', useragent, ' stream number:', self.streamNumber
|
print 'Remote node useragent:', useragent, ' stream number:', self.streamNumber, ' time offset:', timeOffset, 'seconds.'
|
||||||
|
|
||||||
if self.streamNumber != 1:
|
if self.streamNumber != 1:
|
||||||
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
|
self.sendDataThreadQueue.put((0, 'shutdown','no data'))
|
||||||
|
@ -760,7 +757,7 @@ class receiveDataThread(threading.Thread):
|
||||||
return
|
return
|
||||||
shared.connectedHostsList[
|
shared.connectedHostsList[
|
||||||
self.peer.host] = 1 # We use this data structure to not only keep track of what hosts we are connected to so that we don't try to connect to them again, but also to list the connections count on the Network Status tab.
|
self.peer.host] = 1 # We use this data structure to not only keep track of what hosts we are connected to so that we don't try to connect to them again, but also to list the connections count on the Network Status tab.
|
||||||
# If this was an incoming connection, then the sendData thread
|
# If this was an incoming connection, then the sendDataThread
|
||||||
# doesn't know the stream. We have to set it.
|
# doesn't know the stream. We have to set it.
|
||||||
if not self.initiatedConnection:
|
if not self.initiatedConnection:
|
||||||
self.sendDataThreadQueue.put((0, 'setStreamNumber', self.streamNumber))
|
self.sendDataThreadQueue.put((0, 'setStreamNumber', self.streamNumber))
|
||||||
|
|
|
@ -62,9 +62,32 @@ class sendDataThread(threading.Thread):
|
||||||
self.versionSent = 1
|
self.versionSent = 1
|
||||||
|
|
||||||
def sendBytes(self, data):
|
def sendBytes(self, data):
|
||||||
self.sock.sendall(data)
|
if shared.config.getint('bitmessagesettings', 'maxuploadrate') == 0:
|
||||||
shared.numberOfBytesSent += len(data)
|
uploadRateLimitBytes = 999999999 # float("inf") doesn't work
|
||||||
self.lastTimeISentData = int(time.time())
|
else:
|
||||||
|
uploadRateLimitBytes = shared.config.getint('bitmessagesettings', 'maxuploadrate') * 1000
|
||||||
|
with shared.sendDataLock:
|
||||||
|
while data:
|
||||||
|
while shared.numberOfBytesSentLastSecond >= uploadRateLimitBytes:
|
||||||
|
if int(time.time()) == shared.lastTimeWeResetBytesSent:
|
||||||
|
time.sleep(0.3)
|
||||||
|
else:
|
||||||
|
# It's a new second. Let us clear the shared.numberOfBytesSentLastSecond
|
||||||
|
shared.lastTimeWeResetBytesSent = int(time.time())
|
||||||
|
shared.numberOfBytesSentLastSecond = 0
|
||||||
|
# If the user raises or lowers the uploadRateLimit then we should make use of
|
||||||
|
# the new setting. If we are hitting the limit then we'll check here about
|
||||||
|
# once per second.
|
||||||
|
if shared.config.getint('bitmessagesettings', 'maxuploadrate') == 0:
|
||||||
|
uploadRateLimitBytes = 999999999 # float("inf") doesn't work
|
||||||
|
else:
|
||||||
|
uploadRateLimitBytes = shared.config.getint('bitmessagesettings', 'maxuploadrate') * 1000
|
||||||
|
numberOfBytesWeMaySend = uploadRateLimitBytes - shared.numberOfBytesSentLastSecond
|
||||||
|
self.sock.sendall(data[:numberOfBytesWeMaySend])
|
||||||
|
shared.numberOfBytesSent += len(data[:numberOfBytesWeMaySend]) # used for the 'network status' tab in the UI
|
||||||
|
shared.numberOfBytesSentLastSecond += len(data[:numberOfBytesWeMaySend])
|
||||||
|
self.lastTimeISentData = int(time.time())
|
||||||
|
data = data[numberOfBytesWeMaySend:]
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
@ -96,7 +119,7 @@ class sendDataThread(threading.Thread):
|
||||||
elif command == 'advertisepeer':
|
elif command == 'advertisepeer':
|
||||||
self.objectHashHolderInstance.holdPeer(data)
|
self.objectHashHolderInstance.holdPeer(data)
|
||||||
elif command == 'sendaddr':
|
elif command == 'sendaddr':
|
||||||
if self.connectionIsOrWasFullyEstablished: # only send addr messages if we have send and heard a verack from the remote node
|
if self.connectionIsOrWasFullyEstablished: # only send addr messages if we have sent and heard a verack from the remote node
|
||||||
numberOfAddressesInAddrMessage = len(data)
|
numberOfAddressesInAddrMessage = len(data)
|
||||||
payload = ''
|
payload = ''
|
||||||
for hostDetails in data:
|
for hostDetails in data:
|
||||||
|
|
|
@ -9,20 +9,23 @@ import tr#anslate
|
||||||
from helper_sql import *
|
from helper_sql import *
|
||||||
from debug import logger
|
from debug import logger
|
||||||
|
|
||||||
'''The singleCleaner class is a timer-driven thread that cleans data structures to free memory, resends messages when a remote node doesn't respond, and sends pong messages to keep connections alive if the network isn't busy.
|
"""
|
||||||
|
The singleCleaner class is a timer-driven thread that cleans data structures
|
||||||
|
to free memory, resends messages when a remote node doesn't respond, and
|
||||||
|
sends pong messages to keep connections alive if the network isn't busy.
|
||||||
It cleans these data structures in memory:
|
It cleans these data structures in memory:
|
||||||
inventory (moves data to the on-disk sql database)
|
inventory (moves data to the on-disk sql database)
|
||||||
inventorySets (clears then reloads data out of sql database)
|
inventorySets (clears then reloads data out of sql database)
|
||||||
|
|
||||||
It cleans these tables on the disk:
|
It cleans these tables on the disk:
|
||||||
inventory (clears data more than 2 days and 12 hours old)
|
inventory (clears expired objects)
|
||||||
pubkeys (clears pubkeys older than 4 weeks old which we have not used personally)
|
pubkeys (clears pubkeys older than 4 weeks old which we have not used personally)
|
||||||
|
|
||||||
It resends messages when there has been no response:
|
It resends messages when there has been no response:
|
||||||
resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...)
|
resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...)
|
||||||
resends msg messages in 5 days (then 10 days, then 20 days, etc...)
|
resends msg messages in 5 days (then 10 days, then 20 days, etc...)
|
||||||
|
|
||||||
'''
|
"""
|
||||||
|
|
||||||
|
|
||||||
class singleCleaner(threading.Thread):
|
class singleCleaner(threading.Thread):
|
||||||
|
@ -41,22 +44,21 @@ class singleCleaner(threading.Thread):
|
||||||
while True:
|
while True:
|
||||||
shared.UISignalQueue.put((
|
shared.UISignalQueue.put((
|
||||||
'updateStatusBar', 'Doing housekeeping (Flushing inventory in memory to disk...)'))
|
'updateStatusBar', 'Doing housekeeping (Flushing inventory in memory to disk...)'))
|
||||||
|
|
||||||
with shared.inventoryLock: # If you use both the inventoryLock and the sqlLock, always use the inventoryLock OUTSIDE of the sqlLock.
|
with shared.inventoryLock: # If you use both the inventoryLock and the sqlLock, always use the inventoryLock OUTSIDE of the sqlLock.
|
||||||
with SqlBulkExecute() as sql:
|
with SqlBulkExecute() as sql:
|
||||||
for hash, storedValue in shared.inventory.items():
|
for hash, storedValue in shared.inventory.items():
|
||||||
objectType, streamNumber, payload, receivedTime, tag = storedValue
|
objectType, streamNumber, payload, expiresTime, tag = storedValue
|
||||||
if int(time.time()) - 3600 > receivedTime:
|
sql.execute(
|
||||||
sql.execute(
|
'''INSERT INTO inventory VALUES (?,?,?,?,?,?)''',
|
||||||
'''INSERT INTO inventory VALUES (?,?,?,?,?,?)''',
|
hash,
|
||||||
hash,
|
objectType,
|
||||||
objectType,
|
streamNumber,
|
||||||
streamNumber,
|
payload,
|
||||||
payload,
|
expiresTime,
|
||||||
receivedTime,
|
tag)
|
||||||
tag)
|
del shared.inventory[hash]
|
||||||
del shared.inventory[hash]
|
|
||||||
shared.UISignalQueue.put(('updateStatusBar', ''))
|
shared.UISignalQueue.put(('updateStatusBar', ''))
|
||||||
|
|
||||||
shared.broadcastToSendDataQueues((
|
shared.broadcastToSendDataQueues((
|
||||||
0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
|
0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
|
||||||
# If we are running as a daemon then we are going to fill up the UI
|
# If we are running as a daemon then we are going to fill up the UI
|
||||||
|
@ -66,15 +68,9 @@ class singleCleaner(threading.Thread):
|
||||||
shared.UISignalQueue.queue.clear()
|
shared.UISignalQueue.queue.clear()
|
||||||
if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380:
|
if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380:
|
||||||
timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
|
timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
|
||||||
# inventory (moves data from the inventory data structure to
|
|
||||||
# the on-disk sql database)
|
|
||||||
# inventory (clears pubkeys after 28 days and everything else
|
|
||||||
# after 2 days and 12 hours)
|
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'''DELETE FROM inventory WHERE (receivedtime<? AND objecttype<>'pubkey') OR (receivedtime<? AND objecttype='pubkey') ''',
|
'''DELETE FROM inventory WHERE expirestime<? ''',
|
||||||
int(time.time()) - shared.lengthOfTimeToLeaveObjectsInInventory,
|
int(time.time()) - (60 * 60 * 3))
|
||||||
int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys)
|
|
||||||
|
|
||||||
# pubkeys
|
# pubkeys
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'''DELETE FROM pubkeys WHERE time<? AND usedpersonally='no' ''',
|
'''DELETE FROM pubkeys WHERE time<? AND usedpersonally='no' ''',
|
||||||
|
@ -89,7 +85,6 @@ class singleCleaner(threading.Thread):
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
'Something went wrong in the singleCleaner thread: a query did not return the requested fields. ' + repr(row))
|
'Something went wrong in the singleCleaner thread: a query did not return the requested fields. ' + repr(row))
|
||||||
time.sleep(3)
|
time.sleep(3)
|
||||||
|
|
||||||
break
|
break
|
||||||
toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber = row
|
toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber = row
|
||||||
if status == 'awaitingpubkey':
|
if status == 'awaitingpubkey':
|
||||||
|
@ -108,9 +103,10 @@ class singleCleaner(threading.Thread):
|
||||||
shared.inventorySets[streamNumber].add(row[0])
|
shared.inventorySets[streamNumber].add(row[0])
|
||||||
with shared.inventoryLock:
|
with shared.inventoryLock:
|
||||||
for hash, storedValue in shared.inventory.items():
|
for hash, storedValue in shared.inventory.items():
|
||||||
objectType, streamNumber, payload, receivedTime, tag = storedValue
|
objectType, streamNumber, payload, expiresTime, tag = storedValue
|
||||||
if streamNumber in shared.inventorySets:
|
if not streamNumber in shared.inventorySets:
|
||||||
shared.inventorySets[streamNumber].add(hash)
|
shared.inventorySets[streamNumber] = set()
|
||||||
|
shared.inventorySets[streamNumber].add(hash)
|
||||||
|
|
||||||
# Let us write out the knowNodes to disk if there is anything new to write out.
|
# Let us write out the knowNodes to disk if there is anything new to write out.
|
||||||
if shared.needToWriteKnownNodesToDisk:
|
if shared.needToWriteKnownNodesToDisk:
|
||||||
|
|
|
@ -88,7 +88,7 @@ class singleListener(threading.Thread):
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
a, sockaddr = sock.accept()
|
socketObject, sockaddr = sock.accept()
|
||||||
(HOST, PORT) = sockaddr[0:2]
|
(HOST, PORT) = sockaddr[0:2]
|
||||||
|
|
||||||
# If the address is an IPv4-mapped IPv6 address then
|
# If the address is an IPv4-mapped IPv6 address then
|
||||||
|
@ -103,7 +103,7 @@ class singleListener(threading.Thread):
|
||||||
# share the same external IP. This is here to prevent
|
# share the same external IP. This is here to prevent
|
||||||
# connection flooding.
|
# connection flooding.
|
||||||
if HOST in shared.connectedHostsList:
|
if HOST in shared.connectedHostsList:
|
||||||
a.close()
|
socketObject.close()
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'We are already connected to', HOST + '. Ignoring connection.'
|
print 'We are already connected to', HOST + '. Ignoring connection.'
|
||||||
else:
|
else:
|
||||||
|
@ -111,17 +111,17 @@ class singleListener(threading.Thread):
|
||||||
|
|
||||||
someObjectsOfWhichThisRemoteNodeIsAlreadyAware = {} # This is not necessairly a complete list; we clear it from time to time to save memory.
|
someObjectsOfWhichThisRemoteNodeIsAlreadyAware = {} # This is not necessairly a complete list; we clear it from time to time to save memory.
|
||||||
sendDataThreadQueue = Queue.Queue() # Used to submit information to the send data thread for this connection.
|
sendDataThreadQueue = Queue.Queue() # Used to submit information to the send data thread for this connection.
|
||||||
a.settimeout(20)
|
socketObject.settimeout(20)
|
||||||
|
|
||||||
sd = sendDataThread(sendDataThreadQueue)
|
sd = sendDataThread(sendDataThreadQueue)
|
||||||
sd.setup(
|
sd.setup(
|
||||||
a, HOST, PORT, -1, someObjectsOfWhichThisRemoteNodeIsAlreadyAware)
|
socketObject, HOST, PORT, -1, someObjectsOfWhichThisRemoteNodeIsAlreadyAware)
|
||||||
sd.start()
|
sd.start()
|
||||||
|
|
||||||
rd = receiveDataThread()
|
rd = receiveDataThread()
|
||||||
rd.daemon = True # close the main program even if there are threads left
|
rd.daemon = True # close the main program even if there are threads left
|
||||||
rd.setup(
|
rd.setup(
|
||||||
a, HOST, PORT, -1, someObjectsOfWhichThisRemoteNodeIsAlreadyAware, self.selfInitiatedConnections, sendDataThreadQueue)
|
socketObject, HOST, PORT, -1, someObjectsOfWhichThisRemoteNodeIsAlreadyAware, self.selfInitiatedConnections, sendDataThreadQueue)
|
||||||
rd.start()
|
rd.start()
|
||||||
|
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
|
|
|
@ -27,21 +27,20 @@ class singleWorker(threading.Thread):
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''SELECT toripe, toaddress FROM sent WHERE ((status='awaitingpubkey' OR status='doingpubkeypow') AND folder='sent')''')
|
'''SELECT DISTINCT toaddress FROM sent WHERE (status='awaitingpubkey' AND folder='sent')''')
|
||||||
for row in queryreturn:
|
for row in queryreturn:
|
||||||
toripe, toaddress = row
|
toAddress, = row
|
||||||
toStatus, toAddressVersionNumber, toStreamNumber, toRipe = decodeAddress(toaddress)
|
toStatus, toAddressVersionNumber, toStreamNumber, toRipe = decodeAddress(toAddress)
|
||||||
if toAddressVersionNumber <= 3 :
|
if toAddressVersionNumber <= 3 :
|
||||||
shared.neededPubkeys[toripe] = 0
|
shared.neededPubkeys[toRipe] = 0
|
||||||
elif toAddressVersionNumber >= 4:
|
elif toAddressVersionNumber >= 4:
|
||||||
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
|
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
|
||||||
toAddressVersionNumber) + encodeVarint(toStreamNumber) + toRipe).digest()).digest()
|
toAddressVersionNumber) + encodeVarint(toStreamNumber) + toRipe).digest()).digest()
|
||||||
privEncryptionKey = doubleHashOfAddressData[:32] # Note that this is the first half of the sha512 hash.
|
privEncryptionKey = doubleHashOfAddressData[:32] # Note that this is the first half of the sha512 hash.
|
||||||
tag = doubleHashOfAddressData[32:]
|
tag = doubleHashOfAddressData[32:]
|
||||||
shared.neededPubkeys[tag] = highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex')) # We'll need this for when we receive a pubkey reply: it will be encrypted and we'll need to decrypt it.
|
shared.neededPubkeys[tag] = (toAddress, highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex'))) # We'll need this for when we receive a pubkey reply: it will be encrypted and we'll need to decrypt it.
|
||||||
|
|
||||||
# Initialize the shared.ackdataForWhichImWatching data structure using data
|
# Initialize the shared.ackdataForWhichImWatching data structure
|
||||||
# from the sql database.
|
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''SELECT ackdata FROM sent where (status='msgsent' OR status='doingmsgpow')''')
|
'''SELECT ackdata FROM sent where (status='msgsent' OR status='doingmsgpow')''')
|
||||||
for row in queryreturn:
|
for row in queryreturn:
|
||||||
|
@ -96,9 +95,11 @@ class singleWorker(threading.Thread):
|
||||||
myAddress = shared.myAddressesByHash[hash]
|
myAddress = shared.myAddressesByHash[hash]
|
||||||
status, addressVersionNumber, streamNumber, hash = decodeAddress(
|
status, addressVersionNumber, streamNumber, hash = decodeAddress(
|
||||||
myAddress)
|
myAddress)
|
||||||
embeddedTime = int(time.time() + random.randrange(
|
|
||||||
-300, 300)) # the current time plus or minus five minutes
|
TTL = 28 * 24 * 60 * 60 # 28 days
|
||||||
payload = pack('>I', (embeddedTime))
|
embeddedTime = int(time.time() + random.randrange(-300, 300) + TTL) # 28 days from now plus or minus five minutes
|
||||||
|
payload = pack('>Q', (embeddedTime))
|
||||||
|
payload += '\x00\x00\x00\x01' # object type: pubkey
|
||||||
payload += encodeVarint(addressVersionNumber) # Address version number
|
payload += encodeVarint(addressVersionNumber) # Address version number
|
||||||
payload += encodeVarint(streamNumber)
|
payload += encodeVarint(streamNumber)
|
||||||
payload += '\x00\x00\x00\x01' # bitfield of features supported by me (see the wiki).
|
payload += '\x00\x00\x00\x01' # bitfield of features supported by me (see the wiki).
|
||||||
|
@ -112,7 +113,6 @@ class singleWorker(threading.Thread):
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
'Error within doPOWForMyV2Pubkey. Could not read the keys from the keys.dat file for a requested address. %s\n' % err)
|
'Error within doPOWForMyV2Pubkey. Could not read the keys from the keys.dat file for a requested address. %s\n' % err)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
privSigningKeyHex = shared.decodeWalletImportFormat(
|
privSigningKeyHex = shared.decodeWalletImportFormat(
|
||||||
|
@ -128,8 +128,7 @@ class singleWorker(threading.Thread):
|
||||||
payload += pubEncryptionKey[1:]
|
payload += pubEncryptionKey[1:]
|
||||||
|
|
||||||
# Do the POW for this pubkey message
|
# Do the POW for this pubkey message
|
||||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
|
||||||
print '(For pubkey message) Doing proof of work...'
|
print '(For pubkey message) Doing proof of work...'
|
||||||
initialHash = hashlib.sha512(payload).digest()
|
initialHash = hashlib.sha512(payload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
|
@ -137,7 +136,7 @@ class singleWorker(threading.Thread):
|
||||||
payload = pack('>Q', nonce) + payload
|
payload = pack('>Q', nonce) + payload
|
||||||
|
|
||||||
inventoryHash = calculateInventoryHash(payload)
|
inventoryHash = calculateInventoryHash(payload)
|
||||||
objectType = 'pubkey'
|
objectType = 1
|
||||||
shared.inventory[inventoryHash] = (
|
shared.inventory[inventoryHash] = (
|
||||||
objectType, streamNumber, payload, embeddedTime,'')
|
objectType, streamNumber, payload, embeddedTime,'')
|
||||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||||
|
@ -174,9 +173,19 @@ class singleWorker(threading.Thread):
|
||||||
return
|
return
|
||||||
status, addressVersionNumber, streamNumber, hash = decodeAddress(
|
status, addressVersionNumber, streamNumber, hash = decodeAddress(
|
||||||
myAddress)
|
myAddress)
|
||||||
embeddedTime = int(time.time() + random.randrange(
|
|
||||||
-300, 300)) # the current time plus or minus five minutes
|
TTL = 28 * 24 * 60 * 60 # 28 days
|
||||||
payload = pack('>I', (embeddedTime))
|
embeddedTime = int(time.time() + random.randrange(-300, 300) + TTL) # 28 days from now plus or minus five minutes
|
||||||
|
signedTimeForProtocolV2 = embeddedTime - TTL
|
||||||
|
"""
|
||||||
|
According to the protocol specification, the expiresTime along with the pubkey information is
|
||||||
|
signed. But to be backwards compatible during the upgrade period, we shall sign not the
|
||||||
|
expiresTime but rather the current time. There must be precisely a 28 day difference
|
||||||
|
between the two. After the upgrade period we'll switch to signing the whole payload with the
|
||||||
|
expiresTime time.
|
||||||
|
"""
|
||||||
|
payload = pack('>Q', (embeddedTime))
|
||||||
|
payload += '\x00\x00\x00\x01' # object type: pubkey
|
||||||
payload += encodeVarint(addressVersionNumber) # Address version number
|
payload += encodeVarint(addressVersionNumber) # Address version number
|
||||||
payload += encodeVarint(streamNumber)
|
payload += encodeVarint(streamNumber)
|
||||||
payload += '\x00\x00\x00\x01' # bitfield of features supported by me (see the wiki).
|
payload += '\x00\x00\x00\x01' # bitfield of features supported by me (see the wiki).
|
||||||
|
@ -209,21 +218,28 @@ class singleWorker(threading.Thread):
|
||||||
myAddress, 'noncetrialsperbyte'))
|
myAddress, 'noncetrialsperbyte'))
|
||||||
payload += encodeVarint(shared.config.getint(
|
payload += encodeVarint(shared.config.getint(
|
||||||
myAddress, 'payloadlengthextrabytes'))
|
myAddress, 'payloadlengthextrabytes'))
|
||||||
signature = highlevelcrypto.sign(payload, privSigningKeyHex)
|
|
||||||
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
signedData = pack('>I', signedTimeForProtocolV2) + payload[12:]
|
||||||
|
else:
|
||||||
|
signedData = payload
|
||||||
|
|
||||||
|
signature = highlevelcrypto.sign(signedData, privSigningKeyHex)
|
||||||
payload += encodeVarint(len(signature))
|
payload += encodeVarint(len(signature))
|
||||||
payload += signature
|
payload += signature
|
||||||
|
|
||||||
# Do the POW for this pubkey message
|
# Do the POW for this pubkey message
|
||||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
with shared.printLock:
|
||||||
print '(For pubkey message) Doing proof of work...'
|
print '(For pubkey message) Doing proof of work...'
|
||||||
initialHash = hashlib.sha512(payload).digest()
|
initialHash = hashlib.sha512(payload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
print '(For pubkey message) Found proof of work', trialValue, 'Nonce:', nonce
|
with shared.printLock:
|
||||||
|
print '(For pubkey message) Found proof of work. Nonce:', nonce
|
||||||
|
|
||||||
payload = pack('>Q', nonce) + payload
|
payload = pack('>Q', nonce) + payload
|
||||||
inventoryHash = calculateInventoryHash(payload)
|
inventoryHash = calculateInventoryHash(payload)
|
||||||
objectType = 'pubkey'
|
objectType = 1
|
||||||
shared.inventory[inventoryHash] = (
|
shared.inventory[inventoryHash] = (
|
||||||
objectType, streamNumber, payload, embeddedTime,'')
|
objectType, streamNumber, payload, embeddedTime,'')
|
||||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||||
|
@ -256,9 +272,11 @@ class singleWorker(threading.Thread):
|
||||||
return
|
return
|
||||||
status, addressVersionNumber, streamNumber, hash = decodeAddress(
|
status, addressVersionNumber, streamNumber, hash = decodeAddress(
|
||||||
myAddress)
|
myAddress)
|
||||||
embeddedTime = int(time.time() + random.randrange(
|
|
||||||
-300, 300)) # the current time plus or minus five minutes
|
TTL = 28 * 24 * 60 * 60 # 28 days
|
||||||
|
embeddedTime = int(time.time() + random.randrange(-300, 300) + TTL) # 28 days from now plus or minus five minutes
|
||||||
payload = pack('>Q', (embeddedTime))
|
payload = pack('>Q', (embeddedTime))
|
||||||
|
payload += '\x00\x00\x00\x01' # object type: pubkey
|
||||||
payload += encodeVarint(addressVersionNumber) # Address version number
|
payload += encodeVarint(addressVersionNumber) # Address version number
|
||||||
payload += encodeVarint(streamNumber)
|
payload += encodeVarint(streamNumber)
|
||||||
|
|
||||||
|
@ -290,12 +308,10 @@ class singleWorker(threading.Thread):
|
||||||
myAddress, 'noncetrialsperbyte'))
|
myAddress, 'noncetrialsperbyte'))
|
||||||
dataToEncrypt += encodeVarint(shared.config.getint(
|
dataToEncrypt += encodeVarint(shared.config.getint(
|
||||||
myAddress, 'payloadlengthextrabytes'))
|
myAddress, 'payloadlengthextrabytes'))
|
||||||
|
|
||||||
|
|
||||||
signature = highlevelcrypto.sign(payload + dataToEncrypt, privSigningKeyHex)
|
|
||||||
dataToEncrypt += encodeVarint(len(signature))
|
|
||||||
dataToEncrypt += signature
|
|
||||||
|
|
||||||
# Let us encrypt the necessary data. We will use a hash of the data
|
# When we encrypt, we'll use a hash of the data
|
||||||
# contained in an address as a decryption key. This way in order to
|
# contained in an address as a decryption key. This way in order to
|
||||||
# read the public keys in a pubkey message, a node must know the address
|
# read the public keys in a pubkey message, a node must know the address
|
||||||
# first. We'll also tag, unencrypted, the pubkey with part of the hash
|
# first. We'll also tag, unencrypted, the pubkey with part of the hash
|
||||||
|
@ -304,14 +320,33 @@ class singleWorker(threading.Thread):
|
||||||
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
|
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
|
||||||
addressVersionNumber) + encodeVarint(streamNumber) + hash).digest()).digest()
|
addressVersionNumber) + encodeVarint(streamNumber) + hash).digest()).digest()
|
||||||
payload += doubleHashOfAddressData[32:] # the tag
|
payload += doubleHashOfAddressData[32:] # the tag
|
||||||
|
|
||||||
|
"""
|
||||||
|
According to the protocol specification, the expiresTime along with the pubkey information is
|
||||||
|
signed. But to be backwards compatible during the upgrade period, we shall sign not the
|
||||||
|
expiresTime but rather the current time. There must be precisely a 28 day difference
|
||||||
|
between the two. After the upgrade period we'll switch to signing the whole payload from
|
||||||
|
above appended with dataToEncrypt.
|
||||||
|
"""
|
||||||
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
dataToSign = pack('>Q', (embeddedTime - TTL))
|
||||||
|
dataToSign += encodeVarint(addressVersionNumber) # Address version number
|
||||||
|
dataToSign += encodeVarint(streamNumber)
|
||||||
|
dataToSign += dataToEncrypt
|
||||||
|
else:
|
||||||
|
dataToSign = payload + dataToEncrypt
|
||||||
|
|
||||||
|
signature = highlevelcrypto.sign(dataToSign, privSigningKeyHex)
|
||||||
|
dataToEncrypt += encodeVarint(len(signature))
|
||||||
|
dataToEncrypt += signature
|
||||||
|
|
||||||
privEncryptionKey = doubleHashOfAddressData[:32]
|
privEncryptionKey = doubleHashOfAddressData[:32]
|
||||||
pubEncryptionKey = highlevelcrypto.pointMult(privEncryptionKey)
|
pubEncryptionKey = highlevelcrypto.pointMult(privEncryptionKey)
|
||||||
payload += highlevelcrypto.encrypt(
|
payload += highlevelcrypto.encrypt(
|
||||||
dataToEncrypt, pubEncryptionKey.encode('hex'))
|
dataToEncrypt, pubEncryptionKey.encode('hex'))
|
||||||
|
|
||||||
# Do the POW for this pubkey message
|
# Do the POW for this pubkey message
|
||||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
|
||||||
print '(For pubkey message) Doing proof of work...'
|
print '(For pubkey message) Doing proof of work...'
|
||||||
initialHash = hashlib.sha512(payload).digest()
|
initialHash = hashlib.sha512(payload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
|
@ -319,7 +354,7 @@ class singleWorker(threading.Thread):
|
||||||
|
|
||||||
payload = pack('>Q', nonce) + payload
|
payload = pack('>Q', nonce) + payload
|
||||||
inventoryHash = calculateInventoryHash(payload)
|
inventoryHash = calculateInventoryHash(payload)
|
||||||
objectType = 'pubkey'
|
objectType = 1
|
||||||
shared.inventory[inventoryHash] = (
|
shared.inventory[inventoryHash] = (
|
||||||
objectType, streamNumber, payload, embeddedTime, doubleHashOfAddressData[32:])
|
objectType, streamNumber, payload, embeddedTime, doubleHashOfAddressData[32:])
|
||||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||||
|
@ -372,12 +407,22 @@ class singleWorker(threading.Thread):
|
||||||
pubEncryptionKey = highlevelcrypto.privToPub(
|
pubEncryptionKey = highlevelcrypto.privToPub(
|
||||||
privEncryptionKeyHex).decode('hex')
|
privEncryptionKeyHex).decode('hex')
|
||||||
|
|
||||||
payload = pack('>Q', (int(time.time()) + random.randrange(
|
TTL = 2.5 * 24 * 60 * 60 # 2.5 days
|
||||||
-300, 300))) # the current time plus or minus five minutes
|
embeddedTime = int(time.time() + random.randrange(-300, 300) + TTL)
|
||||||
if addressVersionNumber <= 3:
|
payload = pack('>Q', embeddedTime)
|
||||||
payload += encodeVarint(2) # broadcast version
|
payload += '\x00\x00\x00\x03' # object type: broadcast
|
||||||
else:
|
|
||||||
payload += encodeVarint(3) # broadcast version
|
if int(time.time()) < 1416175200: # Before Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
if addressVersionNumber <= 3:
|
||||||
|
payload += encodeVarint(2) # broadcast version
|
||||||
|
else:
|
||||||
|
payload += encodeVarint(3) # broadcast version
|
||||||
|
else: # After Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
if addressVersionNumber <= 3:
|
||||||
|
payload += encodeVarint(4) # broadcast version
|
||||||
|
else:
|
||||||
|
payload += encodeVarint(5) # broadcast version
|
||||||
|
|
||||||
payload += encodeVarint(streamNumber)
|
payload += encodeVarint(streamNumber)
|
||||||
if addressVersionNumber >= 4:
|
if addressVersionNumber >= 4:
|
||||||
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
|
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
|
||||||
|
@ -387,10 +432,13 @@ class singleWorker(threading.Thread):
|
||||||
else:
|
else:
|
||||||
tag = ''
|
tag = ''
|
||||||
|
|
||||||
if addressVersionNumber <= 3:
|
dataToEncrypt = ""
|
||||||
dataToEncrypt = encodeVarint(2) # broadcast version
|
# the broadcast version is not included here after the end of the protocol v3 upgrade period
|
||||||
else:
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
dataToEncrypt = encodeVarint(3) # broadcast version
|
if addressVersionNumber <= 3:
|
||||||
|
dataToEncrypt += encodeVarint(2) # broadcast version
|
||||||
|
else:
|
||||||
|
dataToEncrypt += encodeVarint(3) # broadcast version
|
||||||
dataToEncrypt += encodeVarint(addressVersionNumber)
|
dataToEncrypt += encodeVarint(addressVersionNumber)
|
||||||
dataToEncrypt += encodeVarint(streamNumber)
|
dataToEncrypt += encodeVarint(streamNumber)
|
||||||
dataToEncrypt += '\x00\x00\x00\x01' # behavior bitfield
|
dataToEncrypt += '\x00\x00\x00\x01' # behavior bitfield
|
||||||
|
@ -402,8 +450,13 @@ class singleWorker(threading.Thread):
|
||||||
dataToEncrypt += '\x02' # message encoding type
|
dataToEncrypt += '\x02' # message encoding type
|
||||||
dataToEncrypt += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding per the documentation on the wiki.
|
dataToEncrypt += encodeVarint(len('Subject:' + subject + '\n' + 'Body:' + body)) #Type 2 is simple UTF-8 message encoding per the documentation on the wiki.
|
||||||
dataToEncrypt += 'Subject:' + subject + '\n' + 'Body:' + body
|
dataToEncrypt += 'Subject:' + subject + '\n' + 'Body:' + body
|
||||||
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
dataToSign = dataToEncrypt
|
||||||
|
else:
|
||||||
|
dataToSign = payload + dataToEncrypt
|
||||||
|
|
||||||
signature = highlevelcrypto.sign(
|
signature = highlevelcrypto.sign(
|
||||||
dataToEncrypt, privSigningKeyHex)
|
dataToSign, privSigningKeyHex)
|
||||||
dataToEncrypt += encodeVarint(len(signature))
|
dataToEncrypt += encodeVarint(len(signature))
|
||||||
dataToEncrypt += signature
|
dataToEncrypt += signature
|
||||||
|
|
||||||
|
@ -420,8 +473,7 @@ class singleWorker(threading.Thread):
|
||||||
payload += highlevelcrypto.encrypt(
|
payload += highlevelcrypto.encrypt(
|
||||||
dataToEncrypt, pubEncryptionKey.encode('hex'))
|
dataToEncrypt, pubEncryptionKey.encode('hex'))
|
||||||
|
|
||||||
target = 2 ** 64 / ((len(
|
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
payload) + shared.networkDefaultPayloadLengthExtraBytes + 8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
|
||||||
print '(For broadcast message) Doing proof of work...'
|
print '(For broadcast message) Doing proof of work...'
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||||
ackdata, tr.translateText("MainWindow", "Doing work necessary to send broadcast..."))))
|
ackdata, tr.translateText("MainWindow", "Doing work necessary to send broadcast..."))))
|
||||||
|
@ -430,11 +482,18 @@ class singleWorker(threading.Thread):
|
||||||
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
|
print '(For broadcast message) Found proof of work', trialValue, 'Nonce:', nonce
|
||||||
|
|
||||||
payload = pack('>Q', nonce) + payload
|
payload = pack('>Q', nonce) + payload
|
||||||
|
|
||||||
|
# Sanity check. The payload size should never be larger than 256 KiB. There should
|
||||||
|
# be checks elsewhere in the code to not let the user try to send a message this large
|
||||||
|
# until we implement message continuation.
|
||||||
|
if len(payload) > 2 ** 18: # 256 KiB
|
||||||
|
logger.critical('This broadcast object is too large to send. This should never happen. Object size: %s' % len(payload))
|
||||||
|
continue
|
||||||
|
|
||||||
inventoryHash = calculateInventoryHash(payload)
|
inventoryHash = calculateInventoryHash(payload)
|
||||||
objectType = 'broadcast'
|
objectType = 3
|
||||||
shared.inventory[inventoryHash] = (
|
shared.inventory[inventoryHash] = (
|
||||||
objectType, streamNumber, payload, int(time.time()),tag)
|
objectType, streamNumber, payload, embeddedTime, tag)
|
||||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'sending inv (within sendBroadcast function) for object:', inventoryHash.encode('hex')
|
print 'sending inv (within sendBroadcast function) for object:', inventoryHash.encode('hex')
|
||||||
|
@ -454,141 +513,139 @@ class singleWorker(threading.Thread):
|
||||||
|
|
||||||
|
|
||||||
def sendMsg(self):
|
def sendMsg(self):
|
||||||
# Check to see if there are any messages queued to be sent
|
while True: # while we have a msg that needs some work
|
||||||
queryreturn = sqlQuery(
|
|
||||||
'''SELECT DISTINCT toaddress FROM sent WHERE (status='msgqueued' AND folder='sent')''')
|
# Select just one msg that needs work.
|
||||||
for row in queryreturn: # For each address to which we need to send a message, check to see if we have its pubkey already.
|
|
||||||
toaddress, = row
|
|
||||||
status, toAddressVersion, toStreamNumber, toRipe = decodeAddress(toaddress)
|
|
||||||
# If we are sending a message to ourselves or a chan then we won't need an entry in the pubkeys table; we can calculate the needed pubkey using the private keys in our keys.dat file.
|
|
||||||
if shared.config.has_section(toaddress):
|
|
||||||
sqlExecute(
|
|
||||||
'''UPDATE sent SET status='doingmsgpow' WHERE toaddress=? AND status='msgqueued' ''',
|
|
||||||
toaddress)
|
|
||||||
continue
|
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''SELECT hash FROM pubkeys WHERE hash=? AND addressversion=?''', toRipe, toAddressVersion)
|
'''SELECT toaddress, toripe, fromaddress, subject, message, ackdata, status FROM sent WHERE (status='msgqueued' or status='doingmsgpow' or status='forcepow') and folder='sent' LIMIT 1''')
|
||||||
if queryreturn != []: # If we have the needed pubkey in the pubkey table already, set the status to doingmsgpow (we'll do it further down)
|
if len(queryreturn) == 0: # if there is no work to do then
|
||||||
|
break # break out of this sendMsg loop and
|
||||||
|
# wait for something to get put in the shared.workerQueue.
|
||||||
|
row = queryreturn[0]
|
||||||
|
toaddress, toripe, fromaddress, subject, message, ackdata, status = row
|
||||||
|
toStatus, toAddressVersionNumber, toStreamNumber, toRipe = decodeAddress(
|
||||||
|
toaddress)
|
||||||
|
fromStatus, fromAddressVersionNumber, fromStreamNumber, fromRipe = decodeAddress(
|
||||||
|
fromaddress)
|
||||||
|
|
||||||
|
# We may or may not already have the pubkey for this toAddress. Let's check.
|
||||||
|
if status == 'forcepow':
|
||||||
|
# if the status of this msg is 'forcepow' then clearly we have the pubkey already
|
||||||
|
# because the user could not have overridden the message about the POW being
|
||||||
|
# too difficult without knowing the required difficulty.
|
||||||
|
pass
|
||||||
|
# If we are sending a message to ourselves or a chan then we won't need an entry in the pubkeys table; we can calculate the needed pubkey using the private keys in our keys.dat file.
|
||||||
|
elif shared.config.has_section(toaddress):
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'''UPDATE sent SET status='doingmsgpow' WHERE toaddress=? AND status='msgqueued' ''',
|
'''UPDATE sent SET status='doingmsgpow' WHERE toaddress=? AND status='msgqueued' ''',
|
||||||
toaddress)
|
toaddress)
|
||||||
else: # We don't have the needed pubkey in the pubkey table already.
|
status='doingmsgpow'
|
||||||
if toAddressVersion <= 3:
|
else:
|
||||||
toTag = ''
|
# Let's see if we already have the pubkey in our pubkeys table
|
||||||
else:
|
|
||||||
toTag = hashlib.sha512(hashlib.sha512(encodeVarint(toAddressVersion)+encodeVarint(toStreamNumber)+toRipe).digest()).digest()[32:]
|
|
||||||
if toRipe in shared.neededPubkeys or toTag in shared.neededPubkeys:
|
|
||||||
# We already sent a request for the pubkey
|
|
||||||
sqlExecute(
|
|
||||||
'''UPDATE sent SET status='awaitingpubkey' WHERE toaddress=? AND status='msgqueued' ''', toaddress)
|
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByHash', (
|
|
||||||
toRipe, tr.translateText("MainWindow",'Encryption key was requested earlier.'))))
|
|
||||||
else:
|
|
||||||
# We have not yet sent a request for the pubkey
|
|
||||||
needToRequestPubkey = True
|
|
||||||
if toAddressVersion >= 4: # If we are trying to send to address version >= 4 then the needed pubkey might be encrypted in the inventory.
|
|
||||||
# If we have it we'll need to decrypt it and put it in the pubkeys table.
|
|
||||||
queryreturn = sqlQuery(
|
|
||||||
'''SELECT payload FROM inventory WHERE objecttype='pubkey' and tag=? ''', toTag)
|
|
||||||
if queryreturn != []: # if there was a pubkey in our inventory with the correct tag, we need to try to decrypt it.
|
|
||||||
for row in queryreturn:
|
|
||||||
data, = row
|
|
||||||
if shared.decryptAndCheckPubkeyPayload(data, toaddress) == 'successful':
|
|
||||||
needToRequestPubkey = False
|
|
||||||
print 'debug. successfully decrypted and checked pubkey from sql inventory.' #testing
|
|
||||||
sqlExecute(
|
|
||||||
'''UPDATE sent SET status='doingmsgpow' WHERE toaddress=? AND status='msgqueued' ''',
|
|
||||||
toaddress)
|
|
||||||
break
|
|
||||||
else: # There was something wrong with this pubkey even though it had the correct tag- almost certainly because of malicious behavior or a badly programmed client.
|
|
||||||
continue
|
|
||||||
if needToRequestPubkey: # Obviously we had no success looking in the sql inventory. Let's look through the memory inventory.
|
|
||||||
with shared.inventoryLock:
|
|
||||||
for hash, storedValue in shared.inventory.items():
|
|
||||||
objectType, streamNumber, payload, receivedTime, tag = storedValue
|
|
||||||
if objectType == 'pubkey' and tag == toTag:
|
|
||||||
result = shared.decryptAndCheckPubkeyPayload(payload, toaddress) #if valid, this function also puts it in the pubkeys table.
|
|
||||||
if result == 'successful':
|
|
||||||
print 'debug. successfully decrypted and checked pubkey from memory inventory.'
|
|
||||||
needToRequestPubkey = False
|
|
||||||
sqlExecute(
|
|
||||||
'''UPDATE sent SET status='doingmsgpow' WHERE toaddress=? AND status='msgqueued' ''',
|
|
||||||
toaddress)
|
|
||||||
break
|
|
||||||
if needToRequestPubkey:
|
|
||||||
sqlExecute(
|
|
||||||
'''UPDATE sent SET status='doingpubkeypow' WHERE toaddress=? AND status='msgqueued' ''',
|
|
||||||
toaddress)
|
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByHash', (
|
|
||||||
toRipe, tr.translateText("MainWindow",'Sending a request for the recipient\'s encryption key.'))))
|
|
||||||
self.requestPubKey(toaddress)
|
|
||||||
# Get all messages that are ready to be sent, and also all messages
|
|
||||||
# which we have sent in the last 28 days which were previously marked
|
|
||||||
# as 'toodifficult'. If the user as raised the maximum acceptable
|
|
||||||
# difficulty then those messages may now be sendable.
|
|
||||||
queryreturn = sqlQuery(
|
|
||||||
'''SELECT toaddress, toripe, fromaddress, subject, message, ackdata, status FROM sent WHERE (status='doingmsgpow' or status='forcepow' or (status='toodifficult' and lastactiontime>?)) and folder='sent' ''',
|
|
||||||
int(time.time()) - 2419200)
|
|
||||||
for row in queryreturn: # For each message we need to send..
|
|
||||||
toaddress, toripe, fromaddress, subject, message, ackdata, status = row
|
|
||||||
toStatus, toAddressVersionNumber, toStreamNumber, toHash = decodeAddress(
|
|
||||||
toaddress)
|
|
||||||
fromStatus, fromAddressVersionNumber, fromStreamNumber, fromHash = decodeAddress(
|
|
||||||
fromaddress)
|
|
||||||
|
|
||||||
if not shared.config.has_section(toaddress):
|
|
||||||
# There is a remote possibility that we may no longer have the
|
|
||||||
# recipient's pubkey. Let us make sure we still have it or else the
|
|
||||||
# sendMsg function will appear to freeze. This can happen if the
|
|
||||||
# user sends a message but doesn't let the POW function finish,
|
|
||||||
# then leaves their client off for a long time which could cause
|
|
||||||
# the needed pubkey to expire and be deleted.
|
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'''SELECT hash FROM pubkeys WHERE hash=? AND addressversion=?''',
|
'''SELECT hash FROM pubkeys WHERE hash=? AND addressversion=?''', toRipe, toAddressVersionNumber)
|
||||||
toripe,
|
if queryreturn != []: # If we have the needed pubkey in the pubkey table already,
|
||||||
toAddressVersionNumber)
|
# set the status of this msg to doingmsgpow
|
||||||
if queryreturn == [] and toripe not in shared.neededPubkeys:
|
|
||||||
# We no longer have the needed pubkey and we haven't requested
|
|
||||||
# it.
|
|
||||||
with shared.printLock:
|
|
||||||
sys.stderr.write(
|
|
||||||
'For some reason, the status of a message in our outbox is \'doingmsgpow\' even though we lack the pubkey. Here is the RIPE hash of the needed pubkey: %s\n' % toripe.encode('hex'))
|
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'''UPDATE sent SET status='doingpubkeypow' WHERE toaddress=? AND status='doingmsgpow' ''', toaddress)
|
'''UPDATE sent SET status='doingmsgpow' WHERE toaddress=? AND status='msgqueued' ''',
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByHash', (
|
toaddress)
|
||||||
toripe, tr.translateText("MainWindow",'Sending a request for the recipient\'s encryption key.'))))
|
status = 'doingmsgpow'
|
||||||
self.requestPubKey(toaddress)
|
# mark the pubkey as 'usedpersonally' so that we don't delete it later
|
||||||
continue
|
sqlExecute(
|
||||||
|
'''UPDATE pubkeys SET usedpersonally='yes' WHERE hash=? and addressversion=?''',
|
||||||
|
toRipe,
|
||||||
|
toAddressVersionNumber)
|
||||||
|
else: # We don't have the needed pubkey in the pubkeys table already.
|
||||||
|
if toAddressVersionNumber <= 3:
|
||||||
|
toTag = ''
|
||||||
|
else:
|
||||||
|
toTag = hashlib.sha512(hashlib.sha512(encodeVarint(toAddressVersionNumber)+encodeVarint(toStreamNumber)+toRipe).digest()).digest()[32:]
|
||||||
|
if toRipe in shared.neededPubkeys or toTag in shared.neededPubkeys:
|
||||||
|
# We already sent a request for the pubkey
|
||||||
|
sqlExecute(
|
||||||
|
'''UPDATE sent SET status='awaitingpubkey' WHERE toaddress=? AND status='msgqueued' ''', toaddress)
|
||||||
|
shared.UISignalQueue.put(('updateSentItemStatusByHash', (
|
||||||
|
toRipe, tr.translateText("MainWindow",'Encryption key was requested earlier.'))))
|
||||||
|
continue #on with the next msg on which we can do some work
|
||||||
|
else:
|
||||||
|
# We have not yet sent a request for the pubkey
|
||||||
|
needToRequestPubkey = True
|
||||||
|
if toAddressVersionNumber >= 4: # If we are trying to send to address version >= 4 then
|
||||||
|
# the needed pubkey might be encrypted in the inventory.
|
||||||
|
# If we have it we'll need to decrypt it and put it in
|
||||||
|
# the pubkeys table.
|
||||||
|
|
||||||
|
# The decryptAndCheckPubkeyPayload function expects that the shared.neededPubkeys
|
||||||
|
# dictionary already contains the toAddress and cryptor object associated with
|
||||||
|
# the tag for this toAddress.
|
||||||
|
doubleHashOfToAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
|
||||||
|
toAddressVersionNumber) + encodeVarint(toStreamNumber) + toRipe).digest()).digest()
|
||||||
|
privEncryptionKey = doubleHashOfToAddressData[:32] # The first half of the sha512 hash.
|
||||||
|
tag = doubleHashOfToAddressData[32:] # The second half of the sha512 hash.
|
||||||
|
shared.neededPubkeys[tag] = (toaddress, highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex')))
|
||||||
|
|
||||||
|
queryreturn = sqlQuery(
|
||||||
|
'''SELECT payload FROM inventory WHERE objecttype=1 and tag=? ''', toTag)
|
||||||
|
if queryreturn != []: # if there are any pubkeys in our inventory with the correct tag..
|
||||||
|
for row in queryreturn:
|
||||||
|
payload, = row
|
||||||
|
if shared.decryptAndCheckPubkeyPayload(payload, toaddress) == 'successful':
|
||||||
|
needToRequestPubkey = False
|
||||||
|
sqlExecute(
|
||||||
|
'''UPDATE sent SET status='doingmsgpow' WHERE toaddress=? AND (status='msgqueued' or status='awaitingpubkey' or status='doingpubkeypow')''',
|
||||||
|
toaddress)
|
||||||
|
del shared.neededPubkeys[tag]
|
||||||
|
continue # We'll start back at the beginning, pick up this msg, mark the pubkey as 'usedpersonally', and then send the msg.
|
||||||
|
#else: # There was something wrong with this pubkey object even
|
||||||
|
# though it had the correct tag- almost certainly because
|
||||||
|
# of malicious behavior or a badly programmed client. If
|
||||||
|
# there are any other pubkeys in our inventory with the correct
|
||||||
|
# tag then we'll try to decrypt those.
|
||||||
|
|
||||||
|
if needToRequestPubkey: # Obviously we had no success looking in the sql inventory. Let's look through the memory inventory.
|
||||||
|
with shared.inventoryLock:
|
||||||
|
for hash, storedValue in shared.inventory.items():
|
||||||
|
objectType, streamNumber, payload, expiresTime, tag = storedValue
|
||||||
|
if objectType == 1 and tag == toTag:
|
||||||
|
if shared.decryptAndCheckPubkeyPayload(payload, toaddress) == 'successful': #if valid, this function also puts it in the pubkeys table.
|
||||||
|
needToRequestPubkey = False
|
||||||
|
sqlExecute(
|
||||||
|
'''UPDATE sent SET status='doingmsgpow' WHERE toaddress=? AND (status='msgqueued' or status='awaitingpubkey' or status='doingpubkeypow')''',
|
||||||
|
toaddress)
|
||||||
|
del shared.neededPubkeys[tag]
|
||||||
|
continue # We'll start back at the beginning, pick up this msg, mark the pubkey as 'usedpersonally', and then send the msg.
|
||||||
|
if needToRequestPubkey:
|
||||||
|
sqlExecute(
|
||||||
|
'''UPDATE sent SET status='doingpubkeypow' WHERE toaddress=? AND status='msgqueued' ''',
|
||||||
|
toaddress)
|
||||||
|
shared.UISignalQueue.put(('updateSentItemStatusByHash', (
|
||||||
|
toRipe, tr.translateText("MainWindow",'Sending a request for the recipient\'s encryption key.'))))
|
||||||
|
self.requestPubKey(toaddress)
|
||||||
|
continue #on with the next msg on which we can do some work
|
||||||
|
|
||||||
|
# At this point we know that we have the necessary pubkey in the pubkeys table.
|
||||||
|
TTL = 2.5 * 24 * 60 * 60 # 2.5 days
|
||||||
|
embeddedTime = int(time.time() + random.randrange(-300, 300) + TTL) # 2.5 days from now plus or minus five minutes
|
||||||
|
|
||||||
|
if not shared.config.has_section(toaddress): # if we aren't sending this to ourselves or a chan
|
||||||
shared.ackdataForWhichImWatching[ackdata] = 0
|
shared.ackdataForWhichImWatching[ackdata] = 0
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||||
ackdata, tr.translateText("MainWindow", "Looking up the receiver\'s public key"))))
|
ackdata, tr.translateText("MainWindow", "Looking up the receiver\'s public key"))))
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Sending a message. First 150 characters of message:', repr(message[:150])
|
print 'Sending a message. First 150 characters of message:', repr(message[:150])
|
||||||
|
|
||||||
|
|
||||||
# mark the pubkey as 'usedpersonally' so that we don't ever delete
|
|
||||||
# it.
|
|
||||||
sqlExecute(
|
|
||||||
'''UPDATE pubkeys SET usedpersonally='yes' WHERE hash=? and addressversion=?''',
|
|
||||||
toripe,
|
|
||||||
toAddressVersionNumber)
|
|
||||||
# Let us fetch the recipient's public key out of our database. If
|
# Let us fetch the recipient's public key out of our database. If
|
||||||
# the required proof of work difficulty is too hard then we'll
|
# the required proof of work difficulty is too hard then we'll
|
||||||
# abort.
|
# abort.
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'SELECT transmitdata FROM pubkeys WHERE hash=? and addressversion=?',
|
'SELECT transmitdata FROM pubkeys WHERE hash=? and addressversion=?',
|
||||||
toripe,
|
toRipe,
|
||||||
toAddressVersionNumber)
|
toAddressVersionNumber)
|
||||||
if queryreturn == []:
|
|
||||||
with shared.printLock:
|
|
||||||
sys.stderr.write(
|
|
||||||
'(within sendMsg) The needed pubkey was not found. This should never happen. Aborting send.\n')
|
|
||||||
|
|
||||||
return
|
|
||||||
for row in queryreturn:
|
for row in queryreturn:
|
||||||
pubkeyPayload, = row
|
pubkeyPayload, = row
|
||||||
|
|
||||||
# The pubkey message is stored the way we originally received it
|
# The pubkey message is stored the way we originally received it
|
||||||
|
# under protocol version 2
|
||||||
# which means that we need to read beyond things like the nonce and
|
# which means that we need to read beyond things like the nonce and
|
||||||
# time to get to the actual public keys.
|
# time to get to the actual public keys.
|
||||||
if toAddressVersionNumber <= 3:
|
if toAddressVersionNumber <= 3:
|
||||||
|
@ -645,6 +702,7 @@ class singleWorker(threading.Thread):
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
requiredAverageProofOfWorkNonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||||
if requiredPayloadLengthExtraBytes < shared.networkDefaultPayloadLengthExtraBytes:
|
if requiredPayloadLengthExtraBytes < shared.networkDefaultPayloadLengthExtraBytes:
|
||||||
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
requiredPayloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
|
||||||
|
logger.debug('Using averageProofOfWorkNonceTrialsPerByte: %s and payloadLengthExtraBytes: %s.' % (requiredAverageProofOfWorkNonceTrialsPerByte, requiredPayloadLengthExtraBytes))
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr.translateText("MainWindow", "Doing work necessary to send message.\nReceiver\'s required difficulty: %1 and %2").arg(str(float(
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr.translateText("MainWindow", "Doing work necessary to send message.\nReceiver\'s required difficulty: %1 and %2").arg(str(float(
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)))))
|
requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)))))
|
||||||
if status != 'forcepow':
|
if status != 'forcepow':
|
||||||
|
@ -654,7 +712,7 @@ class singleWorker(threading.Thread):
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'''UPDATE sent SET status='toodifficult' WHERE ackdata=? ''',
|
'''UPDATE sent SET status='toodifficult' WHERE ackdata=? ''',
|
||||||
ackdata)
|
ackdata)
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr.translateText("MainWindow", "Problem: The work demanded by the recipient (%1 and %2) is more difficult than you are willing to do.").arg(str(float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr.translateText("MainWindow", "Problem: The work demanded by the recipient (%1 and %2) is more difficult than you are willing to do. %3").arg(str(float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte)).arg(str(float(
|
||||||
requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)).arg(l10n.formatTimestamp()))))
|
requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes)).arg(l10n.formatTimestamp()))))
|
||||||
continue
|
continue
|
||||||
else: # if we are sending a message to ourselves or a chan..
|
else: # if we are sending a message to ourselves or a chan..
|
||||||
|
@ -680,10 +738,10 @@ class singleWorker(threading.Thread):
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||||
ackdata, tr.translateText("MainWindow", "Doing work necessary to send message."))))
|
ackdata, tr.translateText("MainWindow", "Doing work necessary to send message."))))
|
||||||
|
|
||||||
embeddedTime = pack('>Q', (int(time.time()) + random.randrange(
|
|
||||||
-300, 300))) # the current time plus or minus five minutes.
|
|
||||||
if fromAddressVersionNumber == 2:
|
if fromAddressVersionNumber == 2:
|
||||||
payload = '\x01' # Message version.
|
payload = ""
|
||||||
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
payload += '\x01' # Message version.
|
||||||
payload += encodeVarint(fromAddressVersionNumber)
|
payload += encodeVarint(fromAddressVersionNumber)
|
||||||
payload += encodeVarint(fromStreamNumber)
|
payload += encodeVarint(fromStreamNumber)
|
||||||
payload += '\x00\x00\x00\x01' # Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
|
payload += '\x00\x00\x00\x01' # Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
|
||||||
|
@ -714,7 +772,7 @@ class singleWorker(threading.Thread):
|
||||||
1:] # The \x04 on the beginning of the public keys are not sent. This way there is only one acceptable way to encode and send a public key.
|
1:] # The \x04 on the beginning of the public keys are not sent. This way there is only one acceptable way to encode and send a public key.
|
||||||
payload += pubEncryptionKey[1:]
|
payload += pubEncryptionKey[1:]
|
||||||
|
|
||||||
payload += toHash # This hash will be checked by the receiver of the message to verify that toHash belongs to them. This prevents a Surreptitious Forwarding Attack.
|
payload += toRipe # This hash will be checked by the receiver of the message to verify that toRipe belongs to them. This prevents a Surreptitious Forwarding Attack.
|
||||||
payload += '\x02' # Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
|
payload += '\x02' # Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
|
||||||
messageToTransmit = 'Subject:' + \
|
messageToTransmit = 'Subject:' + \
|
||||||
subject + '\n' + 'Body:' + message
|
subject + '\n' + 'Body:' + message
|
||||||
|
@ -724,12 +782,18 @@ class singleWorker(threading.Thread):
|
||||||
ackdata, toStreamNumber) # The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
|
ackdata, toStreamNumber) # The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
|
||||||
payload += encodeVarint(len(fullAckPayload))
|
payload += encodeVarint(len(fullAckPayload))
|
||||||
payload += fullAckPayload
|
payload += fullAckPayload
|
||||||
signature = highlevelcrypto.sign(payload, privSigningKeyHex)
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
dataToSign = payload
|
||||||
|
else:
|
||||||
|
dataToSign = pack('>Q', embeddedTime) + '\x00\x00\x00\x02' + encodeVarint(1) + encodeVarint(toStreamNumber) + payload
|
||||||
|
signature = highlevelcrypto.sign(dataToSign, privSigningKeyHex)
|
||||||
payload += encodeVarint(len(signature))
|
payload += encodeVarint(len(signature))
|
||||||
payload += signature
|
payload += signature
|
||||||
|
|
||||||
if fromAddressVersionNumber >= 3:
|
if fromAddressVersionNumber >= 3:
|
||||||
payload = '\x01' # Message version.
|
payload = ""
|
||||||
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
payload += '\x01' # Message version.
|
||||||
payload += encodeVarint(fromAddressVersionNumber)
|
payload += encodeVarint(fromAddressVersionNumber)
|
||||||
payload += encodeVarint(fromStreamNumber)
|
payload += encodeVarint(fromStreamNumber)
|
||||||
payload += '\x00\x00\x00\x01' # Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
|
payload += '\x00\x00\x00\x01' # Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
|
||||||
|
@ -774,7 +838,7 @@ class singleWorker(threading.Thread):
|
||||||
payload += encodeVarint(shared.config.getint(
|
payload += encodeVarint(shared.config.getint(
|
||||||
fromaddress, 'payloadlengthextrabytes'))
|
fromaddress, 'payloadlengthextrabytes'))
|
||||||
|
|
||||||
payload += toHash # This hash will be checked by the receiver of the message to verify that toHash belongs to them. This prevents a Surreptitious Forwarding Attack.
|
payload += toRipe # This hash will be checked by the receiver of the message to verify that toRipe belongs to them. This prevents a Surreptitious Forwarding Attack.
|
||||||
payload += '\x02' # Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
|
payload += '\x02' # Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
|
||||||
messageToTransmit = 'Subject:' + \
|
messageToTransmit = 'Subject:' + \
|
||||||
subject + '\n' + 'Body:' + message
|
subject + '\n' + 'Body:' + message
|
||||||
|
@ -793,7 +857,11 @@ class singleWorker(threading.Thread):
|
||||||
ackdata, toStreamNumber) # The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
|
ackdata, toStreamNumber) # The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
|
||||||
payload += encodeVarint(len(fullAckPayload))
|
payload += encodeVarint(len(fullAckPayload))
|
||||||
payload += fullAckPayload
|
payload += fullAckPayload
|
||||||
signature = highlevelcrypto.sign(payload, privSigningKeyHex)
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
dataToSign = payload
|
||||||
|
else:
|
||||||
|
dataToSign = pack('>Q', embeddedTime) + '\x00\x00\x00\x02' + encodeVarint(1) + encodeVarint(toStreamNumber) + payload
|
||||||
|
signature = highlevelcrypto.sign(dataToSign, privSigningKeyHex)
|
||||||
payload += encodeVarint(len(signature))
|
payload += encodeVarint(len(signature))
|
||||||
payload += signature
|
payload += signature
|
||||||
|
|
||||||
|
@ -805,8 +873,13 @@ class singleWorker(threading.Thread):
|
||||||
sqlExecute('''UPDATE sent SET status='badkey' WHERE ackdata=?''', ackdata)
|
sqlExecute('''UPDATE sent SET status='badkey' WHERE ackdata=?''', ackdata)
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata',(ackdata,tr.translateText("MainWindow",'Problem: The recipient\'s encryption key is no good. Could not encrypt message. %1').arg(l10n.formatTimestamp()))))
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata',(ackdata,tr.translateText("MainWindow",'Problem: The recipient\'s encryption key is no good. Could not encrypt message. %1').arg(l10n.formatTimestamp()))))
|
||||||
continue
|
continue
|
||||||
encryptedPayload = embeddedTime + encodeVarint(toStreamNumber) + encrypted
|
|
||||||
target = 2**64 / ((len(encryptedPayload)+requiredPayloadLengthExtraBytes+8) * requiredAverageProofOfWorkNonceTrialsPerByte)
|
encryptedPayload = pack('>Q', embeddedTime)
|
||||||
|
encryptedPayload += '\x00\x00\x00\x02' # object type: msg
|
||||||
|
if int(time.time()) >= 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
encryptedPayload += encodeVarint(1) # msg version
|
||||||
|
encryptedPayload += encodeVarint(toStreamNumber) + encrypted
|
||||||
|
target = 2 ** 64 / (requiredAverageProofOfWorkNonceTrialsPerByte*(len(encryptedPayload) + 8 + requiredPayloadLengthExtraBytes + ((TTL*(len(encryptedPayload)+8+requiredPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print '(For msg message) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes
|
print '(For msg message) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes
|
||||||
|
|
||||||
|
@ -821,11 +894,18 @@ class singleWorker(threading.Thread):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
encryptedPayload = pack('>Q', nonce) + encryptedPayload
|
encryptedPayload = pack('>Q', nonce) + encryptedPayload
|
||||||
|
|
||||||
|
# Sanity check. The encryptedPayload size should never be larger than 256 KiB. There should
|
||||||
|
# be checks elsewhere in the code to not let the user try to send a message this large
|
||||||
|
# until we implement message continuation.
|
||||||
|
if len(encryptedPayload) > 2 ** 18: # 256 KiB
|
||||||
|
logger.critical('This msg object is too large to send. This should never happen. Object size: %s' % len(encryptedPayload))
|
||||||
|
continue
|
||||||
|
|
||||||
inventoryHash = calculateInventoryHash(encryptedPayload)
|
inventoryHash = calculateInventoryHash(encryptedPayload)
|
||||||
objectType = 'msg'
|
objectType = 2
|
||||||
shared.inventory[inventoryHash] = (
|
shared.inventory[inventoryHash] = (
|
||||||
objectType, toStreamNumber, encryptedPayload, int(time.time()),'')
|
objectType, toStreamNumber, encryptedPayload, embeddedTime, '')
|
||||||
shared.inventorySets[toStreamNumber].add(inventoryHash)
|
shared.inventorySets[toStreamNumber].add(inventoryHash)
|
||||||
if shared.config.has_section(toaddress):
|
if shared.config.has_section(toaddress):
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr.translateText("MainWindow", "Message sent. Sent on %1").arg(l10n.formatTimestamp()))))
|
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr.translateText("MainWindow", "Message sent. Sent on %1").arg(l10n.formatTimestamp()))))
|
||||||
|
@ -837,7 +917,7 @@ class singleWorker(threading.Thread):
|
||||||
toStreamNumber, 'advertiseobject', inventoryHash))
|
toStreamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
# Update the status of the message in the 'sent' table to have a
|
# Update the status of the message in the 'sent' table to have a
|
||||||
# 'msgsent' status or 'msgsentnoackexpected' status.
|
# 'msgsent' or 'msgsentnoackexpected' status.
|
||||||
if shared.config.has_section(toaddress):
|
if shared.config.has_section(toaddress):
|
||||||
newStatus = 'msgsentnoackexpected'
|
newStatus = 'msgsentnoackexpected'
|
||||||
else:
|
else:
|
||||||
|
@ -879,11 +959,18 @@ class singleWorker(threading.Thread):
|
||||||
if addressVersionNumber <= 3:
|
if addressVersionNumber <= 3:
|
||||||
shared.neededPubkeys[ripe] = 0
|
shared.neededPubkeys[ripe] = 0
|
||||||
elif addressVersionNumber >= 4:
|
elif addressVersionNumber >= 4:
|
||||||
|
# If the user just clicked 'send' then the tag (and other information) will already
|
||||||
|
# be in the neededPubkeys dictionary. But if we are recovering from a restart
|
||||||
|
# of the client then we have to put it in now.
|
||||||
privEncryptionKey = hashlib.sha512(hashlib.sha512(encodeVarint(addressVersionNumber)+encodeVarint(streamNumber)+ripe).digest()).digest()[:32] # Note that this is the first half of the sha512 hash.
|
privEncryptionKey = hashlib.sha512(hashlib.sha512(encodeVarint(addressVersionNumber)+encodeVarint(streamNumber)+ripe).digest()).digest()[:32] # Note that this is the first half of the sha512 hash.
|
||||||
tag = hashlib.sha512(hashlib.sha512(encodeVarint(addressVersionNumber)+encodeVarint(streamNumber)+ripe).digest()).digest()[32:] # Note that this is the second half of the sha512 hash.
|
tag = hashlib.sha512(hashlib.sha512(encodeVarint(addressVersionNumber)+encodeVarint(streamNumber)+ripe).digest()).digest()[32:] # Note that this is the second half of the sha512 hash.
|
||||||
shared.neededPubkeys[tag] = highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex')) # We'll need this for when we receive a pubkey reply: it will be encrypted and we'll need to decrypt it.
|
if tag not in shared.neededPubkeys:
|
||||||
payload = pack('>Q', (int(time.time()) + random.randrange(
|
shared.neededPubkeys[tag] = (toAddress, highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex'))) # We'll need this for when we receive a pubkey reply: it will be encrypted and we'll need to decrypt it.
|
||||||
-300, 300))) # the current time plus or minus five minutes.
|
|
||||||
|
TTL = 2.5 * 24 * 60 * 60 # 2.5 days
|
||||||
|
embeddedTime = int(time.time() + random.randrange(-300, 300) + TTL) # 2.5 days from now plus or minus five minutes
|
||||||
|
payload = pack('>Q', embeddedTime)
|
||||||
|
payload += '\x00\x00\x00\x00' # object type: getpubkey
|
||||||
payload += encodeVarint(addressVersionNumber)
|
payload += encodeVarint(addressVersionNumber)
|
||||||
payload += encodeVarint(streamNumber)
|
payload += encodeVarint(streamNumber)
|
||||||
if addressVersionNumber <= 3:
|
if addressVersionNumber <= 3:
|
||||||
|
@ -900,19 +987,19 @@ class singleWorker(threading.Thread):
|
||||||
shared.UISignalQueue.put(('updateStatusBar', statusbar))
|
shared.UISignalQueue.put(('updateStatusBar', statusbar))
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByHash', (
|
shared.UISignalQueue.put(('updateSentItemStatusByHash', (
|
||||||
ripe, tr.translateText("MainWindow",'Doing work necessary to request encryption key.'))))
|
ripe, tr.translateText("MainWindow",'Doing work necessary to request encryption key.'))))
|
||||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
|
||||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
TTL = 2.5 * 24 * 60 * 60 # 2.5 days
|
||||||
|
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
initialHash = hashlib.sha512(payload).digest()
|
initialHash = hashlib.sha512(payload).digest()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print 'Found proof of work', trialValue, 'Nonce:', nonce
|
print 'Found proof of work', trialValue, 'Nonce:', nonce
|
||||||
|
|
||||||
|
|
||||||
payload = pack('>Q', nonce) + payload
|
payload = pack('>Q', nonce) + payload
|
||||||
inventoryHash = calculateInventoryHash(payload)
|
inventoryHash = calculateInventoryHash(payload)
|
||||||
objectType = 'getpubkey'
|
objectType = 1
|
||||||
shared.inventory[inventoryHash] = (
|
shared.inventory[inventoryHash] = (
|
||||||
objectType, streamNumber, payload, int(time.time()),'')
|
objectType, streamNumber, payload, embeddedTime, '')
|
||||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||||
print 'sending inv (for the getpubkey message)'
|
print 'sending inv (for the getpubkey message)'
|
||||||
shared.broadcastToSendDataQueues((
|
shared.broadcastToSendDataQueues((
|
||||||
|
@ -927,11 +1014,14 @@ class singleWorker(threading.Thread):
|
||||||
shared.UISignalQueue.put(('updateSentItemStatusByHash', (ripe, tr.translateText("MainWindow",'Sending public key request. Waiting for reply. Requested at %1').arg(l10n.formatTimestamp()))))
|
shared.UISignalQueue.put(('updateSentItemStatusByHash', (ripe, tr.translateText("MainWindow",'Sending public key request. Waiting for reply. Requested at %1').arg(l10n.formatTimestamp()))))
|
||||||
|
|
||||||
def generateFullAckMessage(self, ackdata, toStreamNumber):
|
def generateFullAckMessage(self, ackdata, toStreamNumber):
|
||||||
embeddedTime = pack('>Q', (int(time.time()) + random.randrange(
|
embeddedTime = int(time.time() + random.randrange(-300, 300)) # the current time plus or minus five minutes.
|
||||||
-300, 300))) # the current time plus or minus five minutes.
|
payload = pack('>Q', (embeddedTime))
|
||||||
payload = embeddedTime + encodeVarint(toStreamNumber) + ackdata
|
payload += '\x00\x00\x00\x02' # object type: msg
|
||||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
if int(time.time()) >= 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
payload += encodeVarint(1) # msg version
|
||||||
|
payload += encodeVarint(toStreamNumber) + ackdata
|
||||||
|
TTL = 2.5 * 24 * 60 * 60 # 2.5 days
|
||||||
|
target = 2 ** 64 / (shared.networkDefaultProofOfWorkNonceTrialsPerByte*(len(payload) + 8 + shared.networkDefaultPayloadLengthExtraBytes + ((TTL*(len(payload)+8+shared.networkDefaultPayloadLengthExtraBytes))/(2 ** 16))))
|
||||||
with shared.printLock:
|
with shared.printLock:
|
||||||
print '(For ack message) Doing proof of work...'
|
print '(For ack message) Doing proof of work...'
|
||||||
|
|
||||||
|
@ -946,4 +1036,4 @@ class singleWorker(threading.Thread):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
payload = pack('>Q', nonce) + payload
|
payload = pack('>Q', nonce) + payload
|
||||||
return shared.CreatePacket('msg', payload)
|
return shared.CreatePacket('object', payload)
|
||||||
|
|
|
@ -39,28 +39,33 @@ class sqlThread(threading.Thread):
|
||||||
'''CREATE TABLE blacklist (label text, address text, enabled bool)''' )
|
'''CREATE TABLE blacklist (label text, address text, enabled bool)''' )
|
||||||
self.cur.execute(
|
self.cur.execute(
|
||||||
'''CREATE TABLE whitelist (label text, address text, enabled bool)''' )
|
'''CREATE TABLE whitelist (label text, address text, enabled bool)''' )
|
||||||
# Explanation of what is in the pubkeys table:
|
"""
|
||||||
# The hash is the RIPEMD160 hash that is encoded in the Bitmessage address.
|
Explanation of what is in the pubkeys table:
|
||||||
# transmitdata is literally the data that was included in the Bitmessage pubkey message when it arrived, except for the 24 byte protocol header- ie, it starts with the POW nonce.
|
The hash is the RIPEMD160 hash that is encoded in the Bitmessage address.
|
||||||
# time is the time that the pubkey was broadcast on the network same as with every other type of Bitmessage object.
|
|
||||||
# usedpersonally is set to "yes" if we have used the key
|
transmitdata /was/ literally the data that was included in the Bitmessage pubkey message when it arrived,
|
||||||
# personally. This keeps us from deleting it because we may want to
|
except for the 24 byte protocol header- ie, it started with the POW nonce. Since protocol v3, to maintain
|
||||||
# reply to a message in the future. This field is not a bool
|
backwards compability, the data format of the data on disk is staying the same even though the wire format has changed.
|
||||||
# because we may need more flexability in the future and it doesn't
|
|
||||||
# take up much more space anyway.
|
time is the time that the pubkey was broadcast on the network same as with every other type of Bitmessage object.
|
||||||
|
|
||||||
|
usedpersonally is set to "yes" if we have used the key personally. This keeps us from deleting it because we may want to
|
||||||
|
reply to a message in the future. This field is not a bool because we may need more flexability in the future and it doesn't
|
||||||
|
take up much more space anyway.
|
||||||
|
"""
|
||||||
self.cur.execute(
|
self.cur.execute(
|
||||||
'''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''' )
|
'''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''' )
|
||||||
self.cur.execute(
|
self.cur.execute(
|
||||||
'''CREATE TABLE inventory (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''' )
|
'''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''' )
|
||||||
self.cur.execute(
|
self.cur.execute(
|
||||||
'''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
|
'''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
|
||||||
self.cur.execute(
|
self.cur.execute(
|
||||||
'''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''' )
|
'''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''' )
|
||||||
self.cur.execute( '''INSERT INTO settings VALUES('version','6')''')
|
self.cur.execute( '''INSERT INTO settings VALUES('version','7')''')
|
||||||
self.cur.execute( '''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
|
self.cur.execute( '''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
|
||||||
int(time.time()),))
|
int(time.time()),))
|
||||||
self.cur.execute(
|
self.cur.execute(
|
||||||
'''CREATE TABLE objectprocessorqueue (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' )
|
'''CREATE TABLE objectprocessorqueue (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' )
|
||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
logger.info('Created messages database file')
|
logger.info('Created messages database file')
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
@ -119,8 +124,6 @@ class sqlThread(threading.Thread):
|
||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
|
|
||||||
shared.config.set('bitmessagesettings', 'settingsversion', '4')
|
shared.config.set('bitmessagesettings', 'settingsversion', '4')
|
||||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
|
||||||
shared.config.write(configfile)
|
|
||||||
|
|
||||||
if shared.config.getint('bitmessagesettings', 'settingsversion') == 4:
|
if shared.config.getint('bitmessagesettings', 'settingsversion') == 4:
|
||||||
shared.config.set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
shared.config.set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
||||||
|
@ -135,9 +138,6 @@ class sqlThread(threading.Thread):
|
||||||
shared.config.set(
|
shared.config.set(
|
||||||
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', '0')
|
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', '0')
|
||||||
shared.config.set('bitmessagesettings', 'settingsversion', '6')
|
shared.config.set('bitmessagesettings', 'settingsversion', '6')
|
||||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
|
||||||
shared.config.write(configfile)
|
|
||||||
|
|
||||||
# From now on, let us keep a 'version' embedded in the messages.dat
|
# From now on, let us keep a 'version' embedded in the messages.dat
|
||||||
# file so that when we make changes to the database, the database
|
# file so that when we make changes to the database, the database
|
||||||
# version we are on can stay embedded in the messages.dat file. Let us
|
# version we are on can stay embedded in the messages.dat file. Let us
|
||||||
|
@ -247,12 +247,12 @@ class sqlThread(threading.Thread):
|
||||||
shared.config.set('bitmessagesettings', 'sendoutgoingconnections', 'True')
|
shared.config.set('bitmessagesettings', 'sendoutgoingconnections', 'True')
|
||||||
|
|
||||||
# Raise the default required difficulty from 1 to 2
|
# Raise the default required difficulty from 1 to 2
|
||||||
|
# With the change to protocol v3, this is obsolete.
|
||||||
if shared.config.getint('bitmessagesettings', 'settingsversion') == 6:
|
if shared.config.getint('bitmessagesettings', 'settingsversion') == 6:
|
||||||
if int(shared.config.get('bitmessagesettings','defaultnoncetrialsperbyte')) == shared.networkDefaultProofOfWorkNonceTrialsPerByte:
|
"""if int(shared.config.get('bitmessagesettings','defaultnoncetrialsperbyte')) == shared.networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||||
shared.config.set('bitmessagesettings','defaultnoncetrialsperbyte', str(shared.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
|
shared.config.set('bitmessagesettings','defaultnoncetrialsperbyte', str(shared.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
|
||||||
|
"""
|
||||||
shared.config.set('bitmessagesettings', 'settingsversion', '7')
|
shared.config.set('bitmessagesettings', 'settingsversion', '7')
|
||||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
|
||||||
shared.config.write(configfile)
|
|
||||||
|
|
||||||
# Add a new column to the pubkeys table to store the address version.
|
# Add a new column to the pubkeys table to store the address version.
|
||||||
# We're going to trash all of our pubkeys and let them be redownloaded.
|
# We're going to trash all of our pubkeys and let them be redownloaded.
|
||||||
|
@ -274,11 +274,8 @@ class sqlThread(threading.Thread):
|
||||||
shared.config.set('bitmessagesettings', 'useidenticons', 'True')
|
shared.config.set('bitmessagesettings', 'useidenticons', 'True')
|
||||||
if not shared.config.has_option('bitmessagesettings', 'identiconsuffix'): # acts as a salt
|
if not shared.config.has_option('bitmessagesettings', 'identiconsuffix'): # acts as a salt
|
||||||
shared.config.set('bitmessagesettings', 'identiconsuffix', ''.join(random.choice("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") for x in range(12))) # a twelve character pseudo-password to salt the identicons
|
shared.config.set('bitmessagesettings', 'identiconsuffix', ''.join(random.choice("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") for x in range(12))) # a twelve character pseudo-password to salt the identicons
|
||||||
# Since we've added a new config entry, let's write keys.dat to disk.
|
|
||||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
|
||||||
shared.config.write(configfile)
|
|
||||||
|
|
||||||
#Adjusting time period to stop sending messages
|
#Add settings to support no longer resending messages after a certain period of time even if we never get an ack
|
||||||
if shared.config.getint('bitmessagesettings', 'settingsversion') == 7:
|
if shared.config.getint('bitmessagesettings', 'settingsversion') == 7:
|
||||||
shared.config.set(
|
shared.config.set(
|
||||||
'bitmessagesettings', 'stopresendingafterxdays', '')
|
'bitmessagesettings', 'stopresendingafterxdays', '')
|
||||||
|
@ -302,9 +299,57 @@ class sqlThread(threading.Thread):
|
||||||
item = '''update settings set value=? WHERE key='version';'''
|
item = '''update settings set value=? WHERE key='version';'''
|
||||||
parameters = (6,)
|
parameters = (6,)
|
||||||
self.cur.execute(item, parameters)
|
self.cur.execute(item, parameters)
|
||||||
|
|
||||||
|
# changes related to protocol v3
|
||||||
|
# In table inventory and objectprocessorqueue, objecttype is now an integer (it was a human-friendly string previously)
|
||||||
|
item = '''SELECT value FROM settings WHERE key='version';'''
|
||||||
|
parameters = ''
|
||||||
|
self.cur.execute(item, parameters)
|
||||||
|
currentVersion = int(self.cur.fetchall()[0][0])
|
||||||
|
if currentVersion == 6:
|
||||||
|
logger.debug('In messages.dat database, dropping and recreating the inventory table.')
|
||||||
|
self.cur.execute( '''DROP TABLE inventory''')
|
||||||
|
self.cur.execute( '''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''' )
|
||||||
|
self.cur.execute( '''DROP TABLE objectprocessorqueue''')
|
||||||
|
self.cur.execute( '''CREATE TABLE objectprocessorqueue (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' )
|
||||||
|
item = '''update settings set value=? WHERE key='version';'''
|
||||||
|
parameters = (7,)
|
||||||
|
self.cur.execute(item, parameters)
|
||||||
|
logger.debug('Finished dropping and recreating the inventory table.')
|
||||||
|
|
||||||
|
# With the change to protocol version 3, reset the user-settable difficulties to 1
|
||||||
|
if shared.config.getint('bitmessagesettings', 'settingsversion') == 8:
|
||||||
|
shared.config.set('bitmessagesettings','defaultnoncetrialsperbyte', str(shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||||
|
shared.config.set('bitmessagesettings','defaultpayloadlengthextrabytes', str(shared.networkDefaultPayloadLengthExtraBytes))
|
||||||
|
previousTotalDifficulty = int(shared.config.getint('bitmessagesettings', 'maxacceptablenoncetrialsperbyte')) / 320
|
||||||
|
previousSmallMessageDifficulty = int(shared.config.getint('bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')) / 14000
|
||||||
|
shared.config.set('bitmessagesettings','maxacceptablenoncetrialsperbyte', str(previousTotalDifficulty * 1000))
|
||||||
|
shared.config.set('bitmessagesettings','maxacceptablepayloadlengthextrabytes', str(previousSmallMessageDifficulty * 1000))
|
||||||
|
shared.config.set('bitmessagesettings', 'settingsversion', '9')
|
||||||
|
|
||||||
|
# Adjust the required POW values for each of this user's addresses to conform to protocol v3 norms.
|
||||||
|
if shared.config.getint('bitmessagesettings', 'settingsversion') == 9:
|
||||||
|
for addressInKeysFile in shared.config.sections():
|
||||||
|
try:
|
||||||
|
previousTotalDifficulty = float(shared.config.getint(addressInKeysFile, 'noncetrialsperbyte')) / 320
|
||||||
|
previousSmallMessageDifficulty = float(shared.config.getint(addressInKeysFile, 'payloadlengthextrabytes')) / 14000
|
||||||
|
if previousTotalDifficulty <= 2:
|
||||||
|
previousTotalDifficulty = 1
|
||||||
|
if previousSmallMessageDifficulty < 1:
|
||||||
|
previousSmallMessageDifficulty = 1
|
||||||
|
shared.config.set(addressInKeysFile,'noncetrialsperbyte', str(int(previousTotalDifficulty * 1000)))
|
||||||
|
shared.config.set(addressInKeysFile,'payloadlengthextrabytes', str(int(previousSmallMessageDifficulty * 1000)))
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
shared.config.set('bitmessagesettings', 'maxdownloadrate', '0')
|
||||||
|
shared.config.set('bitmessagesettings', 'maxuploadrate', '0')
|
||||||
|
shared.config.set('bitmessagesettings', 'settingsversion', '10')
|
||||||
|
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
||||||
|
shared.config.write(configfile)
|
||||||
|
|
||||||
|
|
||||||
# Are you hoping to add a new option to the keys.dat file of existing
|
# Are you hoping to add a new option to the keys.dat file of existing
|
||||||
# Bitmessage users? Add it right above this line!
|
# Bitmessage users or modify the SQLite database? Add it right above this line!
|
||||||
|
|
||||||
try:
|
try:
|
||||||
testpayload = '\x00\x00'
|
testpayload = '\x00\x00'
|
||||||
|
|
|
@ -20,6 +20,8 @@ import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
import shared
|
import shared
|
||||||
import sys
|
import sys
|
||||||
|
import helper_startup
|
||||||
|
helper_startup.loadConfig()
|
||||||
|
|
||||||
# TODO(xj9): Get from a config file.
|
# TODO(xj9): Get from a config file.
|
||||||
log_level = 'DEBUG'
|
log_level = 'DEBUG'
|
||||||
|
|
|
@ -12,15 +12,16 @@ def createDefaultKnownNodes(appdata):
|
||||||
stream1 = {}
|
stream1 = {}
|
||||||
|
|
||||||
#stream1[shared.Peer('2604:2000:1380:9f:82e:148b:2746:d0c7', 8080)] = int(time.time())
|
#stream1[shared.Peer('2604:2000:1380:9f:82e:148b:2746:d0c7', 8080)] = int(time.time())
|
||||||
stream1[shared.Peer('68.33.0.104', 8444)] = int(time.time())
|
stream1[shared.Peer('23.239.9.147', 8444)] = int(time.time())
|
||||||
stream1[shared.Peer('97.77.34.35', 8444)] = int(time.time())
|
stream1[shared.Peer('98.218.125.214', 8444)] = int(time.time())
|
||||||
stream1[shared.Peer('71.232.195.131', 8444)] = int(time.time())
|
stream1[shared.Peer('192.121.170.162', 8444)] = int(time.time())
|
||||||
stream1[shared.Peer('192.241.231.39', 8444)] = int(time.time())
|
stream1[shared.Peer('108.61.72.12', 28444)] = int(time.time())
|
||||||
stream1[shared.Peer('75.66.0.116', 8444)] = int(time.time())
|
stream1[shared.Peer('158.222.211.81', 8080)] = int(time.time())
|
||||||
stream1[shared.Peer('182.169.23.102', 8444)] = int(time.time())
|
stream1[shared.Peer('79.163.240.110', 8446)] = int(time.time())
|
||||||
stream1[shared.Peer('75.95.134.9', 8444)] = int(time.time())
|
stream1[shared.Peer('178.62.154.250', 8444)] = int(time.time())
|
||||||
stream1[shared.Peer('46.236.100.108', 48444)] = int(time.time())
|
stream1[shared.Peer('178.62.155.6', 8444)] = int(time.time())
|
||||||
stream1[shared.Peer('66.108.53.42', 8080)] = int(time.time())
|
stream1[shared.Peer('178.62.155.8', 8444)] = int(time.time())
|
||||||
|
stream1[shared.Peer('68.42.42.120', 8444)] = int(time.time())
|
||||||
|
|
||||||
############# Stream 2 #################
|
############# Stream 2 #################
|
||||||
stream2 = {}
|
stream2 = {}
|
||||||
|
|
|
@ -25,7 +25,7 @@ def knownNodes():
|
||||||
shared.knownNodes[stream][peer] = time
|
shared.knownNodes[stream][peer] = time
|
||||||
except:
|
except:
|
||||||
shared.knownNodes = defaultKnownNodes.createDefaultKnownNodes(shared.appdata)
|
shared.knownNodes = defaultKnownNodes.createDefaultKnownNodes(shared.appdata)
|
||||||
if shared.config.getint('bitmessagesettings', 'settingsversion') > 8:
|
if shared.config.getint('bitmessagesettings', 'settingsversion') > 10:
|
||||||
print 'Bitmessage cannot read future versions of the keys file (keys.dat). Run the newer version of Bitmessage.'
|
print 'Bitmessage cannot read future versions of the keys file (keys.dat). Run the newer version of Bitmessage.'
|
||||||
raise SystemExit
|
raise SystemExit
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ def loadConfig():
|
||||||
# This appears to be the first time running the program; there is
|
# This appears to be the first time running the program; there is
|
||||||
# no config file (or it cannot be accessed). Create config file.
|
# no config file (or it cannot be accessed). Create config file.
|
||||||
shared.config.add_section('bitmessagesettings')
|
shared.config.add_section('bitmessagesettings')
|
||||||
shared.config.set('bitmessagesettings', 'settingsversion', '8')
|
shared.config.set('bitmessagesettings', 'settingsversion', '10')
|
||||||
shared.config.set('bitmessagesettings', 'port', '8444')
|
shared.config.set('bitmessagesettings', 'port', '8444')
|
||||||
shared.config.set(
|
shared.config.set(
|
||||||
'bitmessagesettings', 'timeformat', '%%a, %%d %%b %%Y %%I:%%M %%p')
|
'bitmessagesettings', 'timeformat', '%%a, %%d %%b %%Y %%I:%%M %%p')
|
||||||
|
@ -89,7 +89,7 @@ def loadConfig():
|
||||||
shared.config.set(
|
shared.config.set(
|
||||||
'bitmessagesettings', 'messagesencrypted', 'false')
|
'bitmessagesettings', 'messagesencrypted', 'false')
|
||||||
shared.config.set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
shared.config.set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(
|
||||||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
|
shared.networkDefaultProofOfWorkNonceTrialsPerByte))
|
||||||
shared.config.set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
|
shared.config.set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
|
||||||
shared.networkDefaultPayloadLengthExtraBytes))
|
shared.networkDefaultPayloadLengthExtraBytes))
|
||||||
shared.config.set('bitmessagesettings', 'minimizeonclose', 'false')
|
shared.config.set('bitmessagesettings', 'minimizeonclose', 'false')
|
||||||
|
@ -102,6 +102,8 @@ def loadConfig():
|
||||||
shared.config.set('bitmessagesettings', 'useidenticons', 'True')
|
shared.config.set('bitmessagesettings', 'useidenticons', 'True')
|
||||||
shared.config.set('bitmessagesettings', 'identiconsuffix', ''.join(random.choice("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") for x in range(12))) # a twelve character pseudo-password to salt the identicons
|
shared.config.set('bitmessagesettings', 'identiconsuffix', ''.join(random.choice("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") for x in range(12))) # a twelve character pseudo-password to salt the identicons
|
||||||
shared.config.set('bitmessagesettings', 'replybelow', 'False')
|
shared.config.set('bitmessagesettings', 'replybelow', 'False')
|
||||||
|
shared.config.set('bitmessagesettings', 'maxdownloadrate', '0')
|
||||||
|
shared.config.set('bitmessagesettings', 'maxuploadrate', '0')
|
||||||
|
|
||||||
#start:UI setting to stop trying to send messages after X days/months
|
#start:UI setting to stop trying to send messages after X days/months
|
||||||
shared.config.set(
|
shared.config.set(
|
||||||
|
|
|
@ -33,7 +33,10 @@ def sign(msg,hexPrivkey):
|
||||||
return makeCryptor(hexPrivkey).sign(msg)
|
return makeCryptor(hexPrivkey).sign(msg)
|
||||||
# Verifies with hex public key
|
# Verifies with hex public key
|
||||||
def verify(msg,sig,hexPubkey):
|
def verify(msg,sig,hexPubkey):
|
||||||
return makePubCryptor(hexPubkey).verify(sig,msg)
|
try:
|
||||||
|
return makePubCryptor(hexPubkey).verify(sig,msg)
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
# Does an EC point multiplication; turns a private key into a public key.
|
# Does an EC point multiplication; turns a private key into a public key.
|
||||||
def pointMult(secret):
|
def pointMult(secret):
|
||||||
|
|
|
@ -54,13 +54,13 @@ def readPubkeys():
|
||||||
|
|
||||||
def readInventory():
|
def readInventory():
|
||||||
print 'Printing everything in inventory table:'
|
print 'Printing everything in inventory table:'
|
||||||
item = '''select hash, objecttype, streamnumber, payload, receivedtime from inventory'''
|
item = '''select hash, objecttype, streamnumber, payload, expirestime from inventory'''
|
||||||
parameters = ''
|
parameters = ''
|
||||||
cur.execute(item, parameters)
|
cur.execute(item, parameters)
|
||||||
output = cur.fetchall()
|
output = cur.fetchall()
|
||||||
for row in output:
|
for row in output:
|
||||||
hash, objecttype, streamnumber, payload, receivedtime = row
|
hash, objecttype, streamnumber, payload, expirestime = row
|
||||||
print 'Hash:', hash.encode('hex'), objecttype, streamnumber, '\t', payload.encode('hex'), '\t', unicode(strftime('%a, %d %b %Y %I:%M %p',localtime(receivedtime)),'utf-8')
|
print 'Hash:', hash.encode('hex'), objecttype, streamnumber, '\t', payload.encode('hex'), '\t', unicode(strftime('%a, %d %b %Y %I:%M %p',localtime(expirestime)),'utf-8')
|
||||||
|
|
||||||
|
|
||||||
def takeInboxMessagesOutOfTrash():
|
def takeInboxMessagesOutOfTrash():
|
||||||
|
|
|
@ -5,6 +5,7 @@ import hashlib
|
||||||
from struct import unpack, pack
|
from struct import unpack, pack
|
||||||
import sys
|
import sys
|
||||||
from shared import config, frozen
|
from shared import config, frozen
|
||||||
|
import shared
|
||||||
#import os
|
#import os
|
||||||
|
|
||||||
def _set_idle():
|
def _set_idle():
|
||||||
|
@ -39,7 +40,6 @@ def _doSafePoW(target, initialHash):
|
||||||
return [trialValue, nonce]
|
return [trialValue, nonce]
|
||||||
|
|
||||||
def _doFastPoW(target, initialHash):
|
def _doFastPoW(target, initialHash):
|
||||||
import shared
|
|
||||||
import time
|
import time
|
||||||
from multiprocessing import Pool, cpu_count
|
from multiprocessing import Pool, cpu_count
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -436,11 +436,16 @@ class ECC:
|
||||||
pubkey = ephem.get_pubkey()
|
pubkey = ephem.get_pubkey()
|
||||||
iv = OpenSSL.rand(OpenSSL.get_cipher(ciphername).get_blocksize())
|
iv = OpenSSL.rand(OpenSSL.get_cipher(ciphername).get_blocksize())
|
||||||
ctx = Cipher(key_e, iv, 1, ciphername)
|
ctx = Cipher(key_e, iv, 1, ciphername)
|
||||||
ciphertext = ctx.ciphering(data)
|
import time
|
||||||
#ciphertext = iv + pubkey + ctx.ciphering(data) # We will switch to this line after an upgrade period
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
|
ciphertext = ctx.ciphering(data)
|
||||||
|
else:
|
||||||
|
ciphertext = iv + pubkey + ctx.ciphering(data) # Everyone should be using this line after the Bitmessage protocol v3 upgrade period
|
||||||
mac = hmac_sha256(key_m, ciphertext)
|
mac = hmac_sha256(key_m, ciphertext)
|
||||||
return iv + pubkey + ciphertext + mac
|
if int(time.time()) < 1416175200: # Sun, 16 Nov 2014 22:00:00 GMT
|
||||||
#return ciphertext + mac # We will switch to this line after an upgrade period.
|
return iv + pubkey + ciphertext + mac
|
||||||
|
else:
|
||||||
|
return ciphertext + mac # Everyone should be using this line after the Bitmessage protocol v3 upgrade period
|
||||||
|
|
||||||
def decrypt(self, data, ciphername='aes-256-cbc'):
|
def decrypt(self, data, ciphername='aes-256-cbc'):
|
||||||
"""
|
"""
|
||||||
|
|
463
src/shared.py
463
src/shared.py
|
@ -1,9 +1,7 @@
|
||||||
softwareVersion = '0.4.3'
|
softwareVersion = '0.4.4'
|
||||||
verbose = 1
|
verbose = 1
|
||||||
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000 # Equals two days and 12 hours.
|
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000 # This is obsolete with the change to protocol v3 but the singleCleaner thread still hasn't been updated so we need this a little longer.
|
||||||
lengthOfTimeToLeaveObjectsInInventory = 237600 # Equals two days and 18 hours. This should be longer than maximumAgeOfAnObjectThatIAmWillingToAccept so that we don't process messages twice.
|
|
||||||
lengthOfTimeToHoldOnToAllPubkeys = 2419200 # Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
|
lengthOfTimeToHoldOnToAllPubkeys = 2419200 # Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
|
||||||
maximumAgeOfObjectsThatIAdvertiseToOthers = 216000 # Equals two days and 12 hours
|
|
||||||
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 # Equals three hours
|
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 # Equals three hours
|
||||||
useVeryEasyProofOfWorkForTesting = False # If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
|
useVeryEasyProofOfWorkForTesting = False # If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
|
||||||
|
|
||||||
|
@ -22,12 +20,13 @@ import threading
|
||||||
import time
|
import time
|
||||||
from os import path, environ
|
from os import path, environ
|
||||||
from struct import Struct
|
from struct import Struct
|
||||||
|
import traceback
|
||||||
|
|
||||||
# Project imports.
|
# Project imports.
|
||||||
from addresses import *
|
from addresses import *
|
||||||
import highlevelcrypto
|
import highlevelcrypto
|
||||||
import shared
|
import shared
|
||||||
import helper_startup
|
#import helper_startup
|
||||||
from helper_sql import *
|
from helper_sql import *
|
||||||
|
|
||||||
|
|
||||||
|
@ -71,8 +70,14 @@ numberOfMessagesProcessed = 0
|
||||||
numberOfBroadcastsProcessed = 0
|
numberOfBroadcastsProcessed = 0
|
||||||
numberOfPubkeysProcessed = 0
|
numberOfPubkeysProcessed = 0
|
||||||
numberOfInventoryLookupsPerformed = 0
|
numberOfInventoryLookupsPerformed = 0
|
||||||
numberOfBytesReceived = 0
|
numberOfBytesReceived = 0 # Used for the 'network status' page
|
||||||
numberOfBytesSent = 0
|
numberOfBytesSent = 0 # Used for the 'network status' page
|
||||||
|
numberOfBytesReceivedLastSecond = 0 # used for the bandwidth rate limit
|
||||||
|
numberOfBytesSentLastSecond = 0 # used for the bandwidth rate limit
|
||||||
|
lastTimeWeResetBytesReceived = 0 # used for the bandwidth rate limit
|
||||||
|
lastTimeWeResetBytesSent = 0 # used for the bandwidth rate limit
|
||||||
|
sendDataLock = threading.Lock() # used for the bandwidth rate limit
|
||||||
|
receiveDataLock = threading.Lock() # used for the bandwidth rate limit
|
||||||
daemon = False
|
daemon = False
|
||||||
inventorySets = {} # key = streamNumer, value = a set which holds the inventory object hashes that we are aware of. This is used whenever we receive an inv message from a peer to check to see what items are new to us. We don't delete things out of it; instead, the singleCleaner thread clears and refills it every couple hours.
|
inventorySets = {} # key = streamNumer, value = a set which holds the inventory object hashes that we are aware of. This is used whenever we receive an inv message from a peer to check to see what items are new to us. We don't delete things out of it; instead, the singleCleaner thread clears and refills it every couple hours.
|
||||||
needToWriteKnownNodesToDisk = False # If True, the singleCleaner will write it to disk eventually.
|
needToWriteKnownNodesToDisk = False # If True, the singleCleaner will write it to disk eventually.
|
||||||
|
@ -82,8 +87,8 @@ objectProcessorQueue = Queue.Queue(
|
||||||
streamsInWhichIAmParticipating = {}
|
streamsInWhichIAmParticipating = {}
|
||||||
|
|
||||||
#If changed, these values will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
|
#If changed, these values will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
|
||||||
networkDefaultProofOfWorkNonceTrialsPerByte = 320 #The amount of work that should be performed (and demanded) per byte of the payload. Double this number to double the work.
|
networkDefaultProofOfWorkNonceTrialsPerByte = 1000 #The amount of work that should be performed (and demanded) per byte of the payload.
|
||||||
networkDefaultPayloadLengthExtraBytes = 14000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
|
networkDefaultPayloadLengthExtraBytes = 1000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
|
||||||
|
|
||||||
# Remember here the RPC port read from namecoin.conf so we can restore to
|
# Remember here the RPC port read from namecoin.conf so we can restore to
|
||||||
# it as default whenever the user changes the "method" selection for
|
# it as default whenever the user changes the "method" selection for
|
||||||
|
@ -114,10 +119,7 @@ Header = Struct('!L12sL4s')
|
||||||
#Create a packet
|
#Create a packet
|
||||||
def CreatePacket(command, payload=''):
|
def CreatePacket(command, payload=''):
|
||||||
payload_length = len(payload)
|
payload_length = len(payload)
|
||||||
if payload_length == 0:
|
checksum = hashlib.sha512(payload).digest()[0:4]
|
||||||
checksum = '\xCF\x83\xE1\x35'
|
|
||||||
else:
|
|
||||||
checksum = hashlib.sha512(payload).digest()[0:4]
|
|
||||||
|
|
||||||
b = bytearray(Header.size + payload_length)
|
b = bytearray(Header.size + payload_length)
|
||||||
Header.pack_into(b, 0, 0xE9BEB4D9, command, payload_length, checksum)
|
Header.pack_into(b, 0, 0xE9BEB4D9, command, payload_length, checksum)
|
||||||
|
@ -137,7 +139,7 @@ def encodeHost(host):
|
||||||
|
|
||||||
def assembleVersionMessage(remoteHost, remotePort, myStreamNumber):
|
def assembleVersionMessage(remoteHost, remotePort, myStreamNumber):
|
||||||
payload = ''
|
payload = ''
|
||||||
payload += pack('>L', 2) # protocol version.
|
payload += pack('>L', 3) # protocol version.
|
||||||
payload += pack('>q', 1) # bitflags of the services I offer.
|
payload += pack('>q', 1) # bitflags of the services I offer.
|
||||||
payload += pack('>q', int(time.time()))
|
payload += pack('>q', int(time.time()))
|
||||||
|
|
||||||
|
@ -163,6 +165,15 @@ def assembleVersionMessage(remoteHost, remotePort, myStreamNumber):
|
||||||
|
|
||||||
return CreatePacket('version', payload)
|
return CreatePacket('version', payload)
|
||||||
|
|
||||||
|
def assembleErrorMessage(fatal=0, banTime=0, inventoryVector='', errorText=''):
|
||||||
|
payload = encodeVarint(fatal)
|
||||||
|
payload += encodeVarint(banTime)
|
||||||
|
payload += encodeVarint(len(inventoryVector))
|
||||||
|
payload += inventoryVector
|
||||||
|
payload += encodeVarint(len(errorText))
|
||||||
|
payload += errorText
|
||||||
|
return CreatePacket('error', payload)
|
||||||
|
|
||||||
def lookupAppdataFolder():
|
def lookupAppdataFolder():
|
||||||
APPNAME = "PyBitmessage"
|
APPNAME = "PyBitmessage"
|
||||||
if "BITMESSAGE_HOME" in environ:
|
if "BITMESSAGE_HOME" in environ:
|
||||||
|
@ -314,17 +325,20 @@ def reloadBroadcastSendersForWhichImWatching():
|
||||||
privEncryptionKey = doubleHashOfAddressData[:32]
|
privEncryptionKey = doubleHashOfAddressData[:32]
|
||||||
MyECSubscriptionCryptorObjects[tag] = highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex'))
|
MyECSubscriptionCryptorObjects[tag] = highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex'))
|
||||||
|
|
||||||
def isProofOfWorkSufficient(
|
def isProofOfWorkSufficient(data,
|
||||||
data,
|
nonceTrialsPerByte=0,
|
||||||
nonceTrialsPerByte=0,
|
payloadLengthExtraBytes=0):
|
||||||
payloadLengthExtraBytes=0):
|
|
||||||
if nonceTrialsPerByte < networkDefaultProofOfWorkNonceTrialsPerByte:
|
if nonceTrialsPerByte < networkDefaultProofOfWorkNonceTrialsPerByte:
|
||||||
nonceTrialsPerByte = networkDefaultProofOfWorkNonceTrialsPerByte
|
nonceTrialsPerByte = networkDefaultProofOfWorkNonceTrialsPerByte
|
||||||
if payloadLengthExtraBytes < networkDefaultPayloadLengthExtraBytes:
|
if payloadLengthExtraBytes < networkDefaultPayloadLengthExtraBytes:
|
||||||
payloadLengthExtraBytes = networkDefaultPayloadLengthExtraBytes
|
payloadLengthExtraBytes = networkDefaultPayloadLengthExtraBytes
|
||||||
|
endOfLifeTime, = unpack('>Q', data[8:16])
|
||||||
|
TTL = endOfLifeTime - int(time.time())
|
||||||
|
if TTL < 300:
|
||||||
|
TTL = 300
|
||||||
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
|
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
|
||||||
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8])
|
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8])
|
||||||
return POW <= 2 ** 64 / ((len(data) + payloadLengthExtraBytes) * (nonceTrialsPerByte))
|
return POW <= 2 ** 64 / (nonceTrialsPerByte*(len(data) + payloadLengthExtraBytes + ((TTL*(len(data)+payloadLengthExtraBytes))/(2 ** 16))))
|
||||||
|
|
||||||
def doCleanShutdown():
|
def doCleanShutdown():
|
||||||
global shutdown
|
global shutdown
|
||||||
|
@ -371,7 +385,7 @@ def doCleanShutdown():
|
||||||
logger.info('Clean shutdown complete.')
|
logger.info('Clean shutdown complete.')
|
||||||
os._exit(0)
|
os._exit(0)
|
||||||
|
|
||||||
# When you want to command a sendDataThread to do something, like shutdown or send some data, this
|
# If you want to command all of the sendDataThreads to do something, like shutdown or send some data, this
|
||||||
# function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are
|
# function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are
|
||||||
# responsible for putting their queue into (and out of) the sendDataQueues list.
|
# responsible for putting their queue into (and out of) the sendDataQueues list.
|
||||||
def broadcastToSendDataQueues(data):
|
def broadcastToSendDataQueues(data):
|
||||||
|
@ -383,9 +397,9 @@ def flushInventory():
|
||||||
#Note that the singleCleanerThread clears out the inventory dictionary from time to time, although it only clears things that have been in the dictionary for a long time. This clears the inventory dictionary Now.
|
#Note that the singleCleanerThread clears out the inventory dictionary from time to time, although it only clears things that have been in the dictionary for a long time. This clears the inventory dictionary Now.
|
||||||
with SqlBulkExecute() as sql:
|
with SqlBulkExecute() as sql:
|
||||||
for hash, storedValue in inventory.items():
|
for hash, storedValue in inventory.items():
|
||||||
objectType, streamNumber, payload, receivedTime, tag = storedValue
|
objectType, streamNumber, payload, expiresTime, tag = storedValue
|
||||||
sql.execute('''INSERT INTO inventory VALUES (?,?,?,?,?,?)''',
|
sql.execute('''INSERT INTO inventory VALUES (?,?,?,?,?,?)''',
|
||||||
hash,objectType,streamNumber,payload,receivedTime,tag)
|
hash,objectType,streamNumber,payload,expiresTime,tag)
|
||||||
del inventory[hash]
|
del inventory[hash]
|
||||||
|
|
||||||
def fixPotentiallyInvalidUTF8Data(text):
|
def fixPotentiallyInvalidUTF8Data(text):
|
||||||
|
@ -455,115 +469,223 @@ def isBitSetWithinBitfield(fourByteString, n):
|
||||||
x, = unpack('>L', fourByteString)
|
x, = unpack('>L', fourByteString)
|
||||||
return x & 2**n != 0
|
return x & 2**n != 0
|
||||||
|
|
||||||
def decryptAndCheckPubkeyPayload(payload, address):
|
|
||||||
status, addressVersion, streamNumber, ripe = decodeAddress(address)
|
|
||||||
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
|
|
||||||
addressVersion) + encodeVarint(streamNumber) + ripe).digest()).digest()
|
|
||||||
readPosition = 8 # bypass the nonce
|
|
||||||
readPosition += 8 # bypass the time
|
|
||||||
embeddedVersionNumber, varintLength = decodeVarint(
|
|
||||||
payload[readPosition:readPosition + 10])
|
|
||||||
if embeddedVersionNumber != addressVersion:
|
|
||||||
logger.info('Pubkey decryption was UNsuccessful due to address version mismatch. This shouldn\'t have happened.')
|
|
||||||
return 'failed'
|
|
||||||
readPosition += varintLength
|
|
||||||
embeddedStreamNumber, varintLength = decodeVarint(
|
|
||||||
payload[readPosition:readPosition + 10])
|
|
||||||
if embeddedStreamNumber != streamNumber:
|
|
||||||
logger.info('Pubkey decryption was UNsuccessful due to stream number mismatch. This shouldn\'t have happened.')
|
|
||||||
return 'failed'
|
|
||||||
readPosition += varintLength
|
|
||||||
signedData = payload[8:readPosition] # Some of the signed data is not encrypted so let's keep it for now.
|
|
||||||
toTag = payload[readPosition:readPosition+32]
|
|
||||||
readPosition += 32 #for the tag
|
|
||||||
encryptedData = payload[readPosition:]
|
|
||||||
# Let us try to decrypt the pubkey
|
|
||||||
privEncryptionKey = doubleHashOfAddressData[:32]
|
|
||||||
cryptorObject = highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex'))
|
|
||||||
try:
|
|
||||||
decryptedData = cryptorObject.decrypt(encryptedData)
|
|
||||||
except:
|
|
||||||
# Someone must have encrypted some data with a different key
|
|
||||||
# but tagged it with a tag for which we are watching.
|
|
||||||
logger.info('Pubkey decryption was UNsuccessful. This shouldn\'t have happened.')
|
|
||||||
return 'failed'
|
|
||||||
logger.debug('Pubkey decryption successful')
|
|
||||||
readPosition = 4 # bypass the behavior bitfield
|
|
||||||
publicSigningKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
|
||||||
# Is it possible for a public key to be invalid such that trying to
|
|
||||||
# encrypt or check a sig with it will cause an error? If it is, we should
|
|
||||||
# probably test these keys here.
|
|
||||||
readPosition += 64
|
|
||||||
publicEncryptionKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
|
||||||
readPosition += 64
|
|
||||||
specifiedNonceTrialsPerByte, specifiedNonceTrialsPerByteLength = decodeVarint(
|
|
||||||
decryptedData[readPosition:readPosition + 10])
|
|
||||||
readPosition += specifiedNonceTrialsPerByteLength
|
|
||||||
specifiedPayloadLengthExtraBytes, specifiedPayloadLengthExtraBytesLength = decodeVarint(
|
|
||||||
decryptedData[readPosition:readPosition + 10])
|
|
||||||
readPosition += specifiedPayloadLengthExtraBytesLength
|
|
||||||
signedData += decryptedData[:readPosition]
|
|
||||||
signatureLength, signatureLengthLength = decodeVarint(
|
|
||||||
decryptedData[readPosition:readPosition + 10])
|
|
||||||
readPosition += signatureLengthLength
|
|
||||||
signature = decryptedData[readPosition:readPosition + signatureLength]
|
|
||||||
try:
|
|
||||||
if not highlevelcrypto.verify(signedData, signature, publicSigningKey.encode('hex')):
|
|
||||||
logger.info('ECDSA verify failed (within decryptAndCheckPubkeyPayload).')
|
|
||||||
return 'failed'
|
|
||||||
logger.debug('ECDSA verify passed (within decryptAndCheckPubkeyPayload)')
|
|
||||||
except Exception as err:
|
|
||||||
logger.debug('ECDSA verify failed (within decryptAndCheckPubkeyPayload) %s' % err)
|
|
||||||
return 'failed'
|
|
||||||
|
|
||||||
sha = hashlib.new('sha512')
|
def decryptAndCheckPubkeyPayload(data, address):
|
||||||
sha.update(publicSigningKey + publicEncryptionKey)
|
"""
|
||||||
ripeHasher = hashlib.new('ripemd160')
|
With the changes in protocol v3, to maintain backwards compatibility, signatures will be sent
|
||||||
ripeHasher.update(sha.digest())
|
the 'old' way during an upgrade period and then a 'new' simpler way after that. We will therefore
|
||||||
embeddedRipe = ripeHasher.digest()
|
check the sig both ways.
|
||||||
|
Old way:
|
||||||
if embeddedRipe != ripe:
|
signedData = timePubkeyWasSigned(8 bytes) + addressVersion + streamNumber + the decrypted data down through the payloadLengthExtraBytes
|
||||||
# Although this pubkey object had the tag were were looking for and was
|
New way:
|
||||||
# encrypted with the correct encryption key, it doesn't contain the
|
signedData = all of the payload data from the time to the tag + the decrypted data down through the payloadLengthExtraBytes
|
||||||
# correct keys. Someone is either being malicious or using buggy software.
|
|
||||||
logger.info('Pubkey decryption was UNsuccessful due to RIPE mismatch. This shouldn\'t have happened.')
|
|
||||||
return 'failed'
|
|
||||||
|
|
||||||
t = (ripe, addressVersion, signedData, int(time.time()), 'yes')
|
The timePubkeyWasSigned will be calculated by subtracting 28 days form the embedded expiresTime.
|
||||||
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
"""
|
||||||
return 'successful'
|
|
||||||
|
"""
|
||||||
|
The time, address version, and stream number are not encrypted so let's
|
||||||
|
keep that data here for now.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
status, addressVersion, streamNumber, ripe = decodeAddress(address)
|
||||||
|
|
||||||
|
readPosition = 20 # bypass the nonce, time, and object type
|
||||||
|
embeddedAddressVersion, varintLength = decodeVarint(data[readPosition:readPosition + 10])
|
||||||
|
readPosition += varintLength
|
||||||
|
embeddedStreamNumber, varintLength = decodeVarint(data[readPosition:readPosition + 10])
|
||||||
|
readPosition += varintLength
|
||||||
|
|
||||||
|
if addressVersion != embeddedAddressVersion:
|
||||||
|
logger.info('Pubkey decryption was UNsuccessful due to address version mismatch.')
|
||||||
|
return 'failed'
|
||||||
|
if streamNumber != embeddedStreamNumber:
|
||||||
|
logger.info('Pubkey decryption was UNsuccessful due to stream number mismatch.')
|
||||||
|
return 'failed'
|
||||||
|
|
||||||
|
expiresTime, = unpack('>Q', data[8:16])
|
||||||
|
TTL = 28 * 24 * 60 * 60
|
||||||
|
signedDataOldMethod = pack('>Q', (expiresTime - TTL)) # the time that the pubkey was signed. 8 bytes.
|
||||||
|
signedDataOldMethod += data[20:readPosition] # the address version and stream number
|
||||||
|
|
||||||
|
tag = data[readPosition:readPosition + 32]
|
||||||
|
readPosition += 32
|
||||||
|
|
||||||
|
signedDataNewMethod = data[8:readPosition] # the time through the tag
|
||||||
|
|
||||||
|
encryptedData = data[readPosition:]
|
||||||
|
|
||||||
|
# Let us try to decrypt the pubkey
|
||||||
|
toAddress, cryptorObject = shared.neededPubkeys[tag]
|
||||||
|
if toAddress != address:
|
||||||
|
logger.critical('decryptAndCheckPubkeyPayload failed due to toAddress mismatch. This is very peculiar. toAddress: %s, address %s' % (toAddress, address))
|
||||||
|
# the only way I can think that this could happen is if someone encodes their address data two different ways.
|
||||||
|
# That sort of address-malleability should have been prevented earlier.
|
||||||
|
return 'failed'
|
||||||
|
try:
|
||||||
|
decryptedData = cryptorObject.decrypt(encryptedData)
|
||||||
|
except:
|
||||||
|
# Someone must have encrypted some data with a different key
|
||||||
|
# but tagged it with a tag for which we are watching.
|
||||||
|
logger.info('Pubkey decryption was unsuccessful.')
|
||||||
|
return 'failed'
|
||||||
|
|
||||||
|
readPosition = 0
|
||||||
|
bitfieldBehaviors = decryptedData[readPosition:readPosition + 4]
|
||||||
|
readPosition += 4
|
||||||
|
publicSigningKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
||||||
|
readPosition += 64
|
||||||
|
publicEncryptionKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
||||||
|
readPosition += 64
|
||||||
|
specifiedNonceTrialsPerByte, specifiedNonceTrialsPerByteLength = decodeVarint(
|
||||||
|
decryptedData[readPosition:readPosition + 10])
|
||||||
|
readPosition += specifiedNonceTrialsPerByteLength
|
||||||
|
specifiedPayloadLengthExtraBytes, specifiedPayloadLengthExtraBytesLength = decodeVarint(
|
||||||
|
decryptedData[readPosition:readPosition + 10])
|
||||||
|
readPosition += specifiedPayloadLengthExtraBytesLength
|
||||||
|
signedDataOldMethod += decryptedData[:readPosition]
|
||||||
|
signedDataNewMethod += decryptedData[:readPosition]
|
||||||
|
signatureLength, signatureLengthLength = decodeVarint(
|
||||||
|
decryptedData[readPosition:readPosition + 10])
|
||||||
|
readPosition += signatureLengthLength
|
||||||
|
signature = decryptedData[readPosition:readPosition + signatureLength]
|
||||||
|
|
||||||
|
if highlevelcrypto.verify(signedDataOldMethod, signature, publicSigningKey.encode('hex')):
|
||||||
|
logger.info('ECDSA verify passed (within decryptAndCheckPubkeyPayload, old method)')
|
||||||
|
else:
|
||||||
|
logger.info('ECDSA verify failed (within decryptAndCheckPubkeyPayload, old method)')
|
||||||
|
# Try the protocol v3 signing method
|
||||||
|
if highlevelcrypto.verify(signedDataNewMethod, signature, publicSigningKey.encode('hex')):
|
||||||
|
logger.info('ECDSA verify passed (within decryptAndCheckPubkeyPayload, new method)')
|
||||||
|
else:
|
||||||
|
logger.info('ECDSA verify failed (within decryptAndCheckPubkeyPayload, new method)')
|
||||||
|
return 'failed'
|
||||||
|
|
||||||
|
sha = hashlib.new('sha512')
|
||||||
|
sha.update(publicSigningKey + publicEncryptionKey)
|
||||||
|
ripeHasher = hashlib.new('ripemd160')
|
||||||
|
ripeHasher.update(sha.digest())
|
||||||
|
embeddedRipe = ripeHasher.digest()
|
||||||
|
|
||||||
|
if embeddedRipe != ripe:
|
||||||
|
# Although this pubkey object had the tag were were looking for and was
|
||||||
|
# encrypted with the correct encryption key, it doesn't contain the
|
||||||
|
# correct keys. Someone is either being malicious or using buggy software.
|
||||||
|
logger.info('Pubkey decryption was UNsuccessful due to RIPE mismatch.')
|
||||||
|
return 'failed'
|
||||||
|
|
||||||
|
# Everything checked out. Insert it into the pubkeys table.
|
||||||
|
|
||||||
|
logger.info('within decryptAndCheckPubkeyPayload, addressVersion: %s, streamNumber: %s \n\
|
||||||
|
ripe %s\n\
|
||||||
|
publicSigningKey in hex: %s\n\
|
||||||
|
publicEncryptionKey in hex: %s' % (addressVersion,
|
||||||
|
streamNumber,
|
||||||
|
ripe.encode('hex'),
|
||||||
|
publicSigningKey.encode('hex'),
|
||||||
|
publicEncryptionKey.encode('hex')
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
t = (ripe, addressVersion, signedDataOldMethod, int(time.time()), 'yes')
|
||||||
|
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
|
||||||
|
return 'successful'
|
||||||
|
except varintDecodeError as e:
|
||||||
|
logger.info('Pubkey decryption was UNsuccessful due to a malformed varint.')
|
||||||
|
return 'failed'
|
||||||
|
except Exception as e:
|
||||||
|
logger.critical('Pubkey decryption was UNsuccessful because of an unhandled exception! This is definitely a bug! \n%s' % traceback.format_exc())
|
||||||
|
return 'failed'
|
||||||
|
|
||||||
Peer = collections.namedtuple('Peer', ['host', 'port'])
|
Peer = collections.namedtuple('Peer', ['host', 'port'])
|
||||||
|
|
||||||
def checkAndShareMsgWithPeers(data):
|
def checkAndShareObjectWithPeers(data):
|
||||||
|
"""
|
||||||
|
This function is called after either receiving an object off of the wire
|
||||||
|
or after receiving one as ackdata.
|
||||||
|
Returns the length of time that we should reserve to process this message
|
||||||
|
if we are receiving it off of the wire.
|
||||||
|
"""
|
||||||
|
if len(data) > 2 ** 18:
|
||||||
|
logger.info('The payload length of this object is too large (%s bytes). Ignoring it.' % len(data))
|
||||||
|
return
|
||||||
# Let us check to make sure that the proof of work is sufficient.
|
# Let us check to make sure that the proof of work is sufficient.
|
||||||
if not isProofOfWorkSufficient(data):
|
if not isProofOfWorkSufficient(data):
|
||||||
logger.debug('Proof of work in msg message insufficient.')
|
logger.info('Proof of work is insufficient.')
|
||||||
return
|
return 0
|
||||||
|
|
||||||
|
endOfLifeTime, = unpack('>Q', data[8:16])
|
||||||
|
if endOfLifeTime - int(time.time()) > 28 * 24 * 60 * 60 + 10800: # The TTL may not be larger than 28 days + 3 hours of wiggle room
|
||||||
|
logger.info('This object\'s End of Life time is too far in the future. Ignoring it. Time is %s' % endOfLifeTime)
|
||||||
|
return 0
|
||||||
|
if endOfLifeTime - int(time.time()) < - 3600: # The EOL time was more than an hour ago. That's too much.
|
||||||
|
logger.info('This object\'s End of Life time was more than an hour ago. Ignoring the object. Time is %s' % endOfLifeTime)
|
||||||
|
return 0
|
||||||
|
intObjectType, = unpack('>I', data[16:20])
|
||||||
|
try:
|
||||||
|
if intObjectType == 0:
|
||||||
|
_checkAndShareGetpubkeyWithPeers(data)
|
||||||
|
return 0.1
|
||||||
|
elif intObjectType == 1:
|
||||||
|
_checkAndSharePubkeyWithPeers(data)
|
||||||
|
return 0.1
|
||||||
|
elif intObjectType == 2:
|
||||||
|
_checkAndShareMsgWithPeers(data)
|
||||||
|
return 0.6
|
||||||
|
elif intObjectType == 3:
|
||||||
|
_checkAndShareBroadcastWithPeers(data)
|
||||||
|
return 0.6
|
||||||
|
else:
|
||||||
|
_checkAndShareUndefinedObjectWithPeers(data)
|
||||||
|
return 0.6
|
||||||
|
except varintDecodeError as e:
|
||||||
|
logger.debug("There was a problem with a varint while checking to see whether it was appropriate to share an object with peers. Some details: %s" % e)
|
||||||
|
except Exception as e:
|
||||||
|
logger.critical('There was a problem while checking to see whether it was appropriate to share an object with peers. This is definitely a bug! \n%s' % traceback.format_exc())
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
readPosition = 8
|
def _checkAndShareUndefinedObjectWithPeers(data):
|
||||||
embeddedTime, = unpack('>I', data[readPosition:readPosition + 4])
|
embeddedTime, = unpack('>Q', data[8:16])
|
||||||
|
readPosition = 20 # bypass nonce, time, and object type
|
||||||
# This section is used for the transition from 32 bit time to 64 bit
|
objectVersion, objectVersionLength = decodeVarint(
|
||||||
# time in the protocol.
|
|
||||||
if embeddedTime == 0:
|
|
||||||
embeddedTime, = unpack('>Q', data[readPosition:readPosition + 8])
|
|
||||||
readPosition += 8
|
|
||||||
else:
|
|
||||||
readPosition += 4
|
|
||||||
|
|
||||||
if embeddedTime > (int(time.time()) + 10800):
|
|
||||||
logger.debug('The embedded time in this msg message is more than three hours in the future. That doesn\'t make sense. Ignoring message.')
|
|
||||||
return
|
|
||||||
if embeddedTime < (int(time.time()) - maximumAgeOfAnObjectThatIAmWillingToAccept):
|
|
||||||
logger.debug('The embedded time in this msg message is too old. Ignoring message.')
|
|
||||||
return
|
|
||||||
streamNumberAsClaimedByMsg, streamNumberAsClaimedByMsgLength = decodeVarint(
|
|
||||||
data[readPosition:readPosition + 9])
|
data[readPosition:readPosition + 9])
|
||||||
if not streamNumberAsClaimedByMsg in streamsInWhichIAmParticipating:
|
readPosition += objectVersionLength
|
||||||
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumberAsClaimedByMsg)
|
streamNumber, streamNumberLength = decodeVarint(
|
||||||
|
data[readPosition:readPosition + 9])
|
||||||
|
if not streamNumber in streamsInWhichIAmParticipating:
|
||||||
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
return
|
return
|
||||||
readPosition += streamNumberAsClaimedByMsgLength
|
|
||||||
|
inventoryHash = calculateInventoryHash(data)
|
||||||
|
shared.numberOfInventoryLookupsPerformed += 1
|
||||||
|
inventoryLock.acquire()
|
||||||
|
if inventoryHash in inventory:
|
||||||
|
logger.debug('We have already received this undefined object. Ignoring.')
|
||||||
|
inventoryLock.release()
|
||||||
|
return
|
||||||
|
elif isInSqlInventory(inventoryHash):
|
||||||
|
logger.debug('We have already received this undefined object (it is stored on disk in the SQL inventory). Ignoring it.')
|
||||||
|
inventoryLock.release()
|
||||||
|
return
|
||||||
|
objectType, = unpack('>I', data[16:20])
|
||||||
|
inventory[inventoryHash] = (
|
||||||
|
objectType, streamNumber, data, embeddedTime,'')
|
||||||
|
inventorySets[streamNumber].add(inventoryHash)
|
||||||
|
inventoryLock.release()
|
||||||
|
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
|
||||||
|
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
|
|
||||||
|
def _checkAndShareMsgWithPeers(data):
|
||||||
|
embeddedTime, = unpack('>Q', data[8:16])
|
||||||
|
readPosition = 20 # bypass nonce, time, and object type
|
||||||
|
streamNumber, streamNumberLength = decodeVarint(
|
||||||
|
data[readPosition:readPosition + 9])
|
||||||
|
if not streamNumber in streamsInWhichIAmParticipating:
|
||||||
|
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
|
||||||
|
return
|
||||||
|
readPosition += streamNumberLength
|
||||||
inventoryHash = calculateInventoryHash(data)
|
inventoryHash = calculateInventoryHash(data)
|
||||||
shared.numberOfInventoryLookupsPerformed += 1
|
shared.numberOfInventoryLookupsPerformed += 1
|
||||||
inventoryLock.acquire()
|
inventoryLock.acquire()
|
||||||
|
@ -576,13 +698,13 @@ def checkAndShareMsgWithPeers(data):
|
||||||
inventoryLock.release()
|
inventoryLock.release()
|
||||||
return
|
return
|
||||||
# This msg message is valid. Let's let our peers know about it.
|
# This msg message is valid. Let's let our peers know about it.
|
||||||
objectType = 'msg'
|
objectType = 2
|
||||||
inventory[inventoryHash] = (
|
inventory[inventoryHash] = (
|
||||||
objectType, streamNumberAsClaimedByMsg, data, embeddedTime,'')
|
objectType, streamNumber, data, embeddedTime,'')
|
||||||
inventorySets[streamNumberAsClaimedByMsg].add(inventoryHash)
|
inventorySets[streamNumber].add(inventoryHash)
|
||||||
inventoryLock.release()
|
inventoryLock.release()
|
||||||
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
|
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
|
||||||
broadcastToSendDataQueues((streamNumberAsClaimedByMsg, 'advertiseobject', inventoryHash))
|
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
|
||||||
|
|
||||||
# Now let's enqueue it to be processed ourselves.
|
# Now let's enqueue it to be processed ourselves.
|
||||||
# If we already have too much data in the queue to be processed, just sleep for now.
|
# If we already have too much data in the queue to be processed, just sleep for now.
|
||||||
|
@ -592,30 +714,14 @@ def checkAndShareMsgWithPeers(data):
|
||||||
shared.objectProcessorQueueSize += len(data)
|
shared.objectProcessorQueueSize += len(data)
|
||||||
objectProcessorQueue.put((objectType,data))
|
objectProcessorQueue.put((objectType,data))
|
||||||
|
|
||||||
def checkAndSharegetpubkeyWithPeers(data):
|
def _checkAndShareGetpubkeyWithPeers(data):
|
||||||
if not isProofOfWorkSufficient(data):
|
if len(data) < 42:
|
||||||
logger.debug('Proof of work in getpubkey message insufficient.')
|
logger.info('getpubkey message doesn\'t contain enough data. Ignoring.')
|
||||||
return
|
|
||||||
if len(data) < 34:
|
|
||||||
logger.debug('getpubkey message doesn\'t contain enough data. Ignoring.')
|
|
||||||
return
|
|
||||||
readPosition = 8 # bypass the nonce
|
|
||||||
embeddedTime, = unpack('>I', data[readPosition:readPosition + 4])
|
|
||||||
|
|
||||||
# This section is used for the transition from 32 bit time to 64 bit
|
|
||||||
# time in the protocol.
|
|
||||||
if embeddedTime == 0:
|
|
||||||
embeddedTime, = unpack('>Q', data[readPosition:readPosition + 8])
|
|
||||||
readPosition += 8
|
|
||||||
else:
|
|
||||||
readPosition += 4
|
|
||||||
|
|
||||||
if embeddedTime > int(time.time()) + 10800:
|
|
||||||
logger.debug('The time in this getpubkey message is too new. Ignoring it. Time: %s' % embeddedTime)
|
|
||||||
return
|
|
||||||
if embeddedTime < int(time.time()) - maximumAgeOfAnObjectThatIAmWillingToAccept:
|
|
||||||
logger.debug('The time in this getpubkey message is too old. Ignoring it. Time: %s' % embeddedTime)
|
|
||||||
return
|
return
|
||||||
|
if len(data) > 200:
|
||||||
|
logger.info('getpubkey is abnormally long. Sanity check failed. Ignoring object.')
|
||||||
|
embeddedTime, = unpack('>Q', data[8:16])
|
||||||
|
readPosition = 20 # bypass the nonce, time, and object type
|
||||||
requestedAddressVersionNumber, addressVersionLength = decodeVarint(
|
requestedAddressVersionNumber, addressVersionLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += addressVersionLength
|
readPosition += addressVersionLength
|
||||||
|
@ -638,7 +744,7 @@ def checkAndSharegetpubkeyWithPeers(data):
|
||||||
inventoryLock.release()
|
inventoryLock.release()
|
||||||
return
|
return
|
||||||
|
|
||||||
objectType = 'getpubkey'
|
objectType = 0
|
||||||
inventory[inventoryHash] = (
|
inventory[inventoryHash] = (
|
||||||
objectType, streamNumber, data, embeddedTime,'')
|
objectType, streamNumber, data, embeddedTime,'')
|
||||||
inventorySets[streamNumber].add(inventoryHash)
|
inventorySets[streamNumber].add(inventoryHash)
|
||||||
|
@ -655,31 +761,11 @@ def checkAndSharegetpubkeyWithPeers(data):
|
||||||
shared.objectProcessorQueueSize += len(data)
|
shared.objectProcessorQueueSize += len(data)
|
||||||
objectProcessorQueue.put((objectType,data))
|
objectProcessorQueue.put((objectType,data))
|
||||||
|
|
||||||
def checkAndSharePubkeyWithPeers(data):
|
def _checkAndSharePubkeyWithPeers(data):
|
||||||
if len(data) < 146 or len(data) > 420: # sanity check
|
if len(data) < 146 or len(data) > 440: # sanity check
|
||||||
return
|
|
||||||
# Let us make sure that the proof of work is sufficient.
|
|
||||||
if not isProofOfWorkSufficient(data):
|
|
||||||
logger.debug('Proof of work in pubkey message insufficient.')
|
|
||||||
return
|
|
||||||
|
|
||||||
readPosition = 8 # for the nonce
|
|
||||||
embeddedTime, = unpack('>I', data[readPosition:readPosition + 4])
|
|
||||||
|
|
||||||
# This section is used for the transition from 32 bit time to 64 bit
|
|
||||||
# time in the protocol.
|
|
||||||
if embeddedTime == 0:
|
|
||||||
embeddedTime, = unpack('>Q', data[readPosition:readPosition + 8])
|
|
||||||
readPosition += 8
|
|
||||||
else:
|
|
||||||
readPosition += 4
|
|
||||||
|
|
||||||
if embeddedTime < int(time.time()) - lengthOfTimeToHoldOnToAllPubkeys:
|
|
||||||
logger.debug('The embedded time in this pubkey message is too old. Ignoring. Embedded time is: %s' % embeddedTime)
|
|
||||||
return
|
|
||||||
if embeddedTime > int(time.time()) + 10800:
|
|
||||||
logger.debug('The embedded time in this pubkey message more than several hours in the future. This is irrational. Ignoring message.')
|
|
||||||
return
|
return
|
||||||
|
embeddedTime, = unpack('>Q', data[8:16])
|
||||||
|
readPosition = 20 # bypass the nonce, time, and object type
|
||||||
addressVersion, varintLength = decodeVarint(
|
addressVersion, varintLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += varintLength
|
readPosition += varintLength
|
||||||
|
@ -706,7 +792,7 @@ def checkAndSharePubkeyWithPeers(data):
|
||||||
logger.debug('We have already received this pubkey (it is stored on disk in the SQL inventory). Ignoring it.')
|
logger.debug('We have already received this pubkey (it is stored on disk in the SQL inventory). Ignoring it.')
|
||||||
inventoryLock.release()
|
inventoryLock.release()
|
||||||
return
|
return
|
||||||
objectType = 'pubkey'
|
objectType = 1
|
||||||
inventory[inventoryHash] = (
|
inventory[inventoryHash] = (
|
||||||
objectType, streamNumber, data, embeddedTime, tag)
|
objectType, streamNumber, data, embeddedTime, tag)
|
||||||
inventorySets[streamNumber].add(inventoryHash)
|
inventorySets[streamNumber].add(inventoryHash)
|
||||||
|
@ -725,31 +811,12 @@ def checkAndSharePubkeyWithPeers(data):
|
||||||
objectProcessorQueue.put((objectType,data))
|
objectProcessorQueue.put((objectType,data))
|
||||||
|
|
||||||
|
|
||||||
def checkAndShareBroadcastWithPeers(data):
|
def _checkAndShareBroadcastWithPeers(data):
|
||||||
# Let us verify that the proof of work is sufficient.
|
|
||||||
if not isProofOfWorkSufficient(data):
|
|
||||||
logger.debug('Proof of work in broadcast message insufficient.')
|
|
||||||
return
|
|
||||||
readPosition = 8 # bypass the nonce
|
|
||||||
embeddedTime, = unpack('>I', data[readPosition:readPosition + 4])
|
|
||||||
|
|
||||||
# This section is used for the transition from 32 bit time to 64 bit
|
|
||||||
# time in the protocol.
|
|
||||||
if embeddedTime == 0:
|
|
||||||
embeddedTime, = unpack('>Q', data[readPosition:readPosition + 8])
|
|
||||||
readPosition += 8
|
|
||||||
else:
|
|
||||||
readPosition += 4
|
|
||||||
|
|
||||||
if embeddedTime > (int(time.time()) + 10800): # prevent funny business
|
|
||||||
logger.debug('The embedded time in this broadcast message is more than three hours in the future. That doesn\'t make sense. Ignoring message.')
|
|
||||||
return
|
|
||||||
if embeddedTime < (int(time.time()) - maximumAgeOfAnObjectThatIAmWillingToAccept):
|
|
||||||
logger.debug('The embedded time in this broadcast message is too old. Ignoring message.')
|
|
||||||
return
|
|
||||||
if len(data) < 180:
|
if len(data) < 180:
|
||||||
logger.debug('The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.')
|
logger.debug('The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.')
|
||||||
return
|
return
|
||||||
|
embeddedTime, = unpack('>Q', data[8:16])
|
||||||
|
readPosition = 20 # bypass the nonce, time, and object type
|
||||||
broadcastVersion, broadcastVersionLength = decodeVarint(
|
broadcastVersion, broadcastVersionLength = decodeVarint(
|
||||||
data[readPosition:readPosition + 10])
|
data[readPosition:readPosition + 10])
|
||||||
readPosition += broadcastVersionLength
|
readPosition += broadcastVersionLength
|
||||||
|
@ -775,7 +842,7 @@ def checkAndShareBroadcastWithPeers(data):
|
||||||
inventoryLock.release()
|
inventoryLock.release()
|
||||||
return
|
return
|
||||||
# It is valid. Let's let our peers know about it.
|
# It is valid. Let's let our peers know about it.
|
||||||
objectType = 'broadcast'
|
objectType = 3
|
||||||
inventory[inventoryHash] = (
|
inventory[inventoryHash] = (
|
||||||
objectType, streamNumber, data, embeddedTime, tag)
|
objectType, streamNumber, data, embeddedTime, tag)
|
||||||
inventorySets[streamNumber].add(inventoryHash)
|
inventorySets[streamNumber].add(inventoryHash)
|
||||||
|
@ -792,6 +859,4 @@ def checkAndShareBroadcastWithPeers(data):
|
||||||
shared.objectProcessorQueueSize += len(data)
|
shared.objectProcessorQueueSize += len(data)
|
||||||
objectProcessorQueue.put((objectType,data))
|
objectProcessorQueue.put((objectType,data))
|
||||||
|
|
||||||
|
|
||||||
helper_startup.loadConfig()
|
|
||||||
from debug import logger
|
from debug import logger
|
||||||
|
|
Reference in New Issue
Block a user