The idea behind this fix is to prioritize the outgoing object inventory by proof of work strength and calculate at least as much proof of work on the messages we originate so they never have much less than average priority. To achieve this goal the following changes were made:

-arithmetic.py-
Defines isqrt function to calculate integer square roots of large integers.
-shared.py-
Defines averageNonceTrialsPerByteActual, countNonceTrialsPerByteActual, and the addInventory function (to calculate these values and add objects to inventory). Code updated to call addInventory when adding objects to inventory.
-helper_startup.py-
Adds 'averagenoncetrialsperbyteactual' and 'countnoncetrialsperbyteactual' to newly generated keys.dat files.
-class_sqlThread.py-
Reads 'averagenoncetrialsperbyteactual' and 'countnoncetrialsperbyteactual' from keys.dat (or adds them if they don't exist) and resets 'countnoncetrialsperbyteactual' to its square root (with arithmetic.isqrt) when vacuuming messages.dat.
-class_singleCleaner.py-
Writes 'averagenoncetrialsperbyteactual' and 'countnoncetrialsperbyteactual' to disk when the sql database is written to disk.
-class_singleWorker.py-
Defines prioritizeTarget function (to calculate POW targets with a minimum of averageNonceTrialsPerByteActual, for large inventories, and averageNonceTrialsPerByteActual/2, for small inventories). Code updated to call shared.addInventory when adding objects to inventory and to call prioritizeTarget as the last step when calculating targets.
-class_receiveDataThread-
Sorts outgoing inventory by decreasing nonceTrialsPerByteActual (ie increasing proof strength).

Warning: "Max acceptable difficulty" settings are not implemented as method for limiting the POW specified by the new parameter. We will not be implementing it, but will instead focus on porting the spam fix to the upcoming V3 protocol.
This commit is contained in:
Your Name 2014-09-24 21:17:12 +00:00
parent 13db5fe00c
commit 533e2dd99b
7 changed files with 87 additions and 38 deletions

View File

@ -276,15 +276,17 @@ class receiveDataThread(threading.Thread):
# Select all hashes which are younger than two days old and in this # Select all hashes which are younger than two days old and in this
# stream. # stream.
queryreturn = sqlQuery( queryreturn = sqlQuery(
'''SELECT hash FROM inventory WHERE ((receivedtime>? and objecttype<>'pubkey') or (receivedtime>? and objecttype='pubkey')) and streamnumber=?''', '''SELECT hash, payload FROM inventory WHERE ((receivedtime>? and objecttype<>'pubkey') or (receivedtime>? and objecttype='pubkey')) and streamnumber=?''',
int(time.time()) - shared.maximumAgeOfObjectsThatIAdvertiseToOthers, int(time.time()) - shared.maximumAgeOfObjectsThatIAdvertiseToOthers,
int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys, int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys,
self.streamNumber) self.streamNumber)
bigInvList = {} bigInvList = {}
for row in queryreturn: for row in queryreturn:
hash, = row hash, payload = row
if hash not in self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware: if hash not in self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware:
bigInvList[hash] = 0 POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(payload[
:8] + hashlib.sha512(payload[8:]).digest()).digest()).digest()[0:8]) #calculate POW
bigInvList[hash] = (2 ** 64)/(POW*len(payload)) #calculate nonceTrialsPerByteActual
# We also have messages in our inventory in memory (which is a python # We also have messages in our inventory in memory (which is a python
# dictionary). Let's fetch those too. # dictionary). Let's fetch those too.
with shared.inventoryLock: with shared.inventoryLock:
@ -292,12 +294,14 @@ class receiveDataThread(threading.Thread):
if hash not in self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware: if hash not in self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware:
objectType, streamNumber, payload, receivedTime, tag = storedValue objectType, streamNumber, payload, receivedTime, tag = storedValue
if streamNumber == self.streamNumber and receivedTime > int(time.time()) - shared.maximumAgeOfObjectsThatIAdvertiseToOthers: if streamNumber == self.streamNumber and receivedTime > int(time.time()) - shared.maximumAgeOfObjectsThatIAdvertiseToOthers:
bigInvList[hash] = 0 POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(payload[
:8] + hashlib.sha512(payload[8:]).digest()).digest()).digest()[0:8]) #calculate POW
bigInvList[hash] = (2 ** 64)/(POW*len(payload)) #calculate nonceTrialsPerByteActual
numberOfObjectsInInvMessage = 0 numberOfObjectsInInvMessage = 0
payload = '' payload = ''
# Now let us start appending all of these hashes together. They will be # Now let us start appending all of these hashes together. They will be sent out in a
# sent out in a big inv message to our new peer. # big inv message to our new peer with the stronger-for-their-length POW objects first
for hash, storedValue in bigInvList.items(): for hash in sorted(bigInvList, key=bigInvList.__getitem__, reverse=True):
payload += hash payload += hash
numberOfObjectsInInvMessage += 1 numberOfObjectsInInvMessage += 1
if numberOfObjectsInInvMessage == 50000: # We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages. if numberOfObjectsInInvMessage == 50000: # We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.

View File

@ -56,6 +56,10 @@ class singleCleaner(threading.Thread):
receivedTime, receivedTime,
tag) tag)
del shared.inventory[hash] del shared.inventory[hash]
shared.config.set('bitmessagesettings', 'averagenoncetrialsperbyteactual', str(shared.averageNonceTrialsPerByteActual))
shared.config.set('bitmessagesettings', 'countnoncetrialsperbyteactual', str(shared.countNonceTrialsPerByteActual))
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
shared.UISignalQueue.put(('updateStatusBar', '')) shared.UISignalQueue.put(('updateStatusBar', ''))
shared.broadcastToSendDataQueues(( shared.broadcastToSendDataQueues((
0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes. 0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.

View File

@ -130,6 +130,7 @@ class singleWorker(threading.Thread):
# Do the POW for this pubkey message # Do the POW for this pubkey message
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes + target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte) 8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
self.prioritizeTarget(target, payload)
print '(For pubkey message) Doing proof of work...' print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest() initialHash = hashlib.sha512(payload).digest()
trialValue, nonce = proofofwork.run(target, initialHash) trialValue, nonce = proofofwork.run(target, initialHash)
@ -138,9 +139,7 @@ class singleWorker(threading.Thread):
inventoryHash = calculateInventoryHash(payload) inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey' objectType = 'pubkey'
shared.inventory[inventoryHash] = ( shared.addInventory(inventoryHash, objectType, streamNumber, payload, embeddedTime, '')
objectType, streamNumber, payload, embeddedTime,'')
shared.inventorySets[streamNumber].add(inventoryHash)
with shared.printLock: with shared.printLock:
print 'broadcasting inv with hash:', inventoryHash.encode('hex') print 'broadcasting inv with hash:', inventoryHash.encode('hex')
@ -216,6 +215,7 @@ class singleWorker(threading.Thread):
# Do the POW for this pubkey message # Do the POW for this pubkey message
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes + target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte) 8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
self.prioritizeTarget(target, payload)
print '(For pubkey message) Doing proof of work...' print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest() initialHash = hashlib.sha512(payload).digest()
trialValue, nonce = proofofwork.run(target, initialHash) trialValue, nonce = proofofwork.run(target, initialHash)
@ -224,9 +224,7 @@ class singleWorker(threading.Thread):
payload = pack('>Q', nonce) + payload payload = pack('>Q', nonce) + payload
inventoryHash = calculateInventoryHash(payload) inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey' objectType = 'pubkey'
shared.inventory[inventoryHash] = ( shared.addInventory(inventoryHash, objectType, streamNumber, payload, embeddedTime, '')
objectType, streamNumber, payload, embeddedTime,'')
shared.inventorySets[streamNumber].add(inventoryHash)
with shared.printLock: with shared.printLock:
print 'broadcasting inv with hash:', inventoryHash.encode('hex') print 'broadcasting inv with hash:', inventoryHash.encode('hex')
@ -312,6 +310,7 @@ class singleWorker(threading.Thread):
# Do the POW for this pubkey message # Do the POW for this pubkey message
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes + target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte) 8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
self.prioritizeTarget(target, payload)
print '(For pubkey message) Doing proof of work...' print '(For pubkey message) Doing proof of work...'
initialHash = hashlib.sha512(payload).digest() initialHash = hashlib.sha512(payload).digest()
trialValue, nonce = proofofwork.run(target, initialHash) trialValue, nonce = proofofwork.run(target, initialHash)
@ -320,9 +319,7 @@ class singleWorker(threading.Thread):
payload = pack('>Q', nonce) + payload payload = pack('>Q', nonce) + payload
inventoryHash = calculateInventoryHash(payload) inventoryHash = calculateInventoryHash(payload)
objectType = 'pubkey' objectType = 'pubkey'
shared.inventory[inventoryHash] = ( shared.addInventory(inventoryHash, objectType, streamNumber, payload, embeddedTime, doubleHashOfAddressData[32:])
objectType, streamNumber, payload, embeddedTime, doubleHashOfAddressData[32:])
shared.inventorySets[streamNumber].add(inventoryHash)
with shared.printLock: with shared.printLock:
print 'broadcasting inv with hash:', inventoryHash.encode('hex') print 'broadcasting inv with hash:', inventoryHash.encode('hex')
@ -422,6 +419,8 @@ class singleWorker(threading.Thread):
target = 2 ** 64 / ((len( target = 2 ** 64 / ((len(
payload) + shared.networkDefaultPayloadLengthExtraBytes + 8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte) payload) + shared.networkDefaultPayloadLengthExtraBytes + 8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
if ( target > (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)):
target = (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)
print '(For broadcast message) Doing proof of work...' print '(For broadcast message) Doing proof of work...'
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', ( shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
ackdata, tr.translateText("MainWindow", "Doing work necessary to send broadcast...")))) ackdata, tr.translateText("MainWindow", "Doing work necessary to send broadcast..."))))
@ -433,9 +432,7 @@ class singleWorker(threading.Thread):
inventoryHash = calculateInventoryHash(payload) inventoryHash = calculateInventoryHash(payload)
objectType = 'broadcast' objectType = 'broadcast'
shared.inventory[inventoryHash] = ( shared.addInventory(inventoryHash, objectType, streamNumber, payload, int(time.time()), tag)
objectType, streamNumber, payload, int(time.time()),tag)
shared.inventorySets[streamNumber].add(inventoryHash)
with shared.printLock: with shared.printLock:
print 'sending inv (within sendBroadcast function) for object:', inventoryHash.encode('hex') print 'sending inv (within sendBroadcast function) for object:', inventoryHash.encode('hex')
shared.broadcastToSendDataQueues(( shared.broadcastToSendDataQueues((
@ -807,6 +804,8 @@ class singleWorker(threading.Thread):
continue continue
encryptedPayload = embeddedTime + encodeVarint(toStreamNumber) + encrypted encryptedPayload = embeddedTime + encodeVarint(toStreamNumber) + encrypted
target = 2**64 / ((len(encryptedPayload)+requiredPayloadLengthExtraBytes+8) * requiredAverageProofOfWorkNonceTrialsPerByte) target = 2**64 / ((len(encryptedPayload)+requiredPayloadLengthExtraBytes+8) * requiredAverageProofOfWorkNonceTrialsPerByte)
if ( target > (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)):
target = (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)
with shared.printLock: with shared.printLock:
print '(For msg message) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes print '(For msg message) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes
@ -824,9 +823,7 @@ class singleWorker(threading.Thread):
inventoryHash = calculateInventoryHash(encryptedPayload) inventoryHash = calculateInventoryHash(encryptedPayload)
objectType = 'msg' objectType = 'msg'
shared.inventory[inventoryHash] = ( shared.addInventory(inventoryHash, objectType, toStreamNumber, encryptedPayload, int(time.time()), '')
objectType, toStreamNumber, encryptedPayload, int(time.time()),'')
shared.inventorySets[toStreamNumber].add(inventoryHash)
if shared.config.has_section(toaddress): if shared.config.has_section(toaddress):
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr.translateText("MainWindow", "Message sent. Sent on %1").arg(l10n.formatTimestamp())))) shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr.translateText("MainWindow", "Message sent. Sent on %1").arg(l10n.formatTimestamp()))))
else: else:
@ -902,6 +899,7 @@ class singleWorker(threading.Thread):
ripe, tr.translateText("MainWindow",'Doing work necessary to request encryption key.')))) ripe, tr.translateText("MainWindow",'Doing work necessary to request encryption key.'))))
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes + target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte) 8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
self.prioritizeTarget(target, payload)
initialHash = hashlib.sha512(payload).digest() initialHash = hashlib.sha512(payload).digest()
trialValue, nonce = proofofwork.run(target, initialHash) trialValue, nonce = proofofwork.run(target, initialHash)
with shared.printLock: with shared.printLock:
@ -911,9 +909,7 @@ class singleWorker(threading.Thread):
payload = pack('>Q', nonce) + payload payload = pack('>Q', nonce) + payload
inventoryHash = calculateInventoryHash(payload) inventoryHash = calculateInventoryHash(payload)
objectType = 'getpubkey' objectType = 'getpubkey'
shared.inventory[inventoryHash] = ( shared.addInventory(inventoryHash, objectType, streamNumber, payload, int(time.time()), '')
objectType, streamNumber, payload, int(time.time()),'')
shared.inventorySets[streamNumber].add(inventoryHash)
print 'sending inv (for the getpubkey message)' print 'sending inv (for the getpubkey message)'
shared.broadcastToSendDataQueues(( shared.broadcastToSendDataQueues((
streamNumber, 'advertiseobject', inventoryHash)) streamNumber, 'advertiseobject', inventoryHash))
@ -932,6 +928,7 @@ class singleWorker(threading.Thread):
payload = embeddedTime + encodeVarint(toStreamNumber) + ackdata payload = embeddedTime + encodeVarint(toStreamNumber) + ackdata
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes + target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte) 8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
self.prioritizeTarget(target, payload)
with shared.printLock: with shared.printLock:
print '(For ack message) Doing proof of work...' print '(For ack message) Doing proof of work...'
@ -947,3 +944,10 @@ class singleWorker(threading.Thread):
payload = pack('>Q', nonce) + payload payload = pack('>Q', nonce) + payload
return shared.CreatePacket('msg', payload) return shared.CreatePacket('msg', payload)
def prioritizeTarget(self, target, payload):
#set target to the inventory average of length adjusted targets for
#large inventories and twice the inventory average for small inventories
if ( target > (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)):
target = (1+1/shared.countNonceTrialsPerByteActual)*(2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)

View File

@ -10,6 +10,7 @@ from namecoin import ensureNamecoinOptions
import random import random
import string import string
import tr#anslate import tr#anslate
from pyelliptic import arithmetic
# This thread exists because SQLITE3 is so un-threadsafe that we must # This thread exists because SQLITE3 is so un-threadsafe that we must
# submit queries to it and it puts results back in a different queue. They # submit queries to it and it puts results back in a different queue. They
@ -303,6 +304,15 @@ class sqlThread(threading.Thread):
parameters = (6,) parameters = (6,)
self.cur.execute(item, parameters) self.cur.execute(item, parameters)
if shared.config.has_option('bitmessagesettings', 'averagenoncetrialsperbyteactual') and shared.config.has_option('bitmessagesettings', 'countnoncetrialsperbyteactual'):
shared.averageNonceTrialsPerByteActual = shared.config.getint('bitmessagesettings', 'averagenoncetrialsperbyteactual')
shared.countNonceTrialsPerByteActual = shared.config.getint('bitmessagesettings', 'countnoncetrialsperbyteactual')
else:
shared.config.set('bitmessagesettings', 'averagenoncetrialsperbyteactual', str(shared.averageNonceTrialsPerByteActual))
shared.config.set('bitmessagesettings', 'countnoncetrialsperbyteactual', str(shared.countNonceTrialsPerByteActual))
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
shared.config.write(configfile)
# Are you hoping to add a new option to the keys.dat file of existing # Are you hoping to add a new option to the keys.dat file of existing
# Bitmessage users? Add it right above this line! # Bitmessage users? Add it right above this line!
@ -345,6 +355,7 @@ class sqlThread(threading.Thread):
logger.info('It has been a long time since the messages.dat file has been vacuumed. Vacuuming now...') logger.info('It has been a long time since the messages.dat file has been vacuumed. Vacuuming now...')
try: try:
self.cur.execute( ''' VACUUM ''') self.cur.execute( ''' VACUUM ''')
shared.countNonceTrialsPerByteActual = arithmetic.isqrt(shared.countNonceTrialsPerByteActual)
except Exception as err: except Exception as err:
if str(err) == 'database or disk is full': if str(err) == 'database or disk is full':
logger.fatal('(While VACUUM) Alert: Your disk or data storage volume is full. sqlThread will now exit.') logger.fatal('(While VACUUM) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
@ -419,6 +430,7 @@ class sqlThread(threading.Thread):
self.conn.commit() self.conn.commit()
try: try:
self.cur.execute( ''' VACUUM ''') self.cur.execute( ''' VACUUM ''')
shared.countNonceTrialsPerByteActual = arithmetic.isqrt(shared.countNonceTrialsPerByteActual)
except Exception as err: except Exception as err:
if str(err) == 'database or disk is full': if str(err) == 'database or disk is full':
logger.fatal('(while deleteandvacuume) Alert: Your disk or data storage volume is full. sqlThread will now exit.') logger.fatal('(while deleteandvacuume) Alert: Your disk or data storage volume is full. sqlThread will now exit.')

View File

@ -92,6 +92,8 @@ def loadConfig():
shared.networkDefaultProofOfWorkNonceTrialsPerByte * 2)) shared.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
shared.config.set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str( shared.config.set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
shared.networkDefaultPayloadLengthExtraBytes)) shared.networkDefaultPayloadLengthExtraBytes))
shared.config.set('bitmessagesettings', 'averagenoncetrialsperbyteactual', str(shared.averageNonceTrialsPerByteActual))
shared.config.set('bitmessagesettings', 'countnoncetrialsperbyteactual', str(shared.countNonceTrialsPerByteActual))
shared.config.set('bitmessagesettings', 'minimizeonclose', 'false') shared.config.set('bitmessagesettings', 'minimizeonclose', 'false')
shared.config.set( shared.config.set(
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', '0') 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', '0')

View File

@ -70,6 +70,21 @@ def base10_multiply(a,n):
if (n%2) == 0: return base10_double(base10_multiply(a,n/2)) if (n%2) == 0: return base10_double(base10_multiply(a,n/2))
if (n%2) == 1: return base10_add(base10_double(base10_multiply(a,n/2)),a) if (n%2) == 1: return base10_add(base10_double(base10_multiply(a,n/2)),a)
def isqrt(x):
if x < 0:
raise ValueError('square root not defined for negative numbers')
n = int(x)
if n == 0:
return 0
a, b = divmod(n.bit_length(), 2)
x = 2**(a+b)
while True:
y = (x + n//x)//2
if y >= x:
return x
x = y
def hex_to_point(h): return (decode(h[2:66],16),decode(h[66:],16)) def hex_to_point(h): return (decode(h[2:66],16),decode(h[66:],16))
def point_to_hex(p): return '04'+encode(p[0],16,64)+encode(p[1],16,64) def point_to_hex(p): return '04'+encode(p[0],16,64)+encode(p[1],16,64)

View File

@ -85,6 +85,10 @@ streamsInWhichIAmParticipating = {}
networkDefaultProofOfWorkNonceTrialsPerByte = 320 #The amount of work that should be performed (and demanded) per byte of the payload. Double this number to double the work. networkDefaultProofOfWorkNonceTrialsPerByte = 320 #The amount of work that should be performed (and demanded) per byte of the payload. Double this number to double the work.
networkDefaultPayloadLengthExtraBytes = 14000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target. networkDefaultPayloadLengthExtraBytes = 14000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
#inventory average (and count) of nonce trials per byte of actual payload (excluding extra bytes)
averageNonceTrialsPerByteActual = (networkDefaultPayloadLengthExtraBytes * networkDefaultProofOfWorkNonceTrialsPerByte)/50
countNonceTrialsPerByteActual = 1
# Remember here the RPC port read from namecoin.conf so we can restore to # Remember here the RPC port read from namecoin.conf so we can restore to
# it as default whenever the user changes the "method" selection for # it as default whenever the user changes the "method" selection for
# namecoin integration to "namecoind". # namecoin integration to "namecoind".
@ -577,9 +581,7 @@ def checkAndShareMsgWithPeers(data):
return return
# This msg message is valid. Let's let our peers know about it. # This msg message is valid. Let's let our peers know about it.
objectType = 'msg' objectType = 'msg'
inventory[inventoryHash] = ( addInventory(inventoryHash, objectType, streamNumberAsClaimedByMsg, data, embeddedTime, '')
objectType, streamNumberAsClaimedByMsg, data, embeddedTime,'')
inventorySets[streamNumberAsClaimedByMsg].add(inventoryHash)
inventoryLock.release() inventoryLock.release()
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex')) logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
broadcastToSendDataQueues((streamNumberAsClaimedByMsg, 'advertiseobject', inventoryHash)) broadcastToSendDataQueues((streamNumberAsClaimedByMsg, 'advertiseobject', inventoryHash))
@ -639,9 +641,7 @@ def checkAndSharegetpubkeyWithPeers(data):
return return
objectType = 'getpubkey' objectType = 'getpubkey'
inventory[inventoryHash] = ( addInventory(inventoryHash, objectType, streamNumber, data, embeddedTime, '')
objectType, streamNumber, data, embeddedTime,'')
inventorySets[streamNumber].add(inventoryHash)
inventoryLock.release() inventoryLock.release()
# This getpubkey request is valid. Forward to peers. # This getpubkey request is valid. Forward to peers.
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex')) logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
@ -707,9 +707,7 @@ def checkAndSharePubkeyWithPeers(data):
inventoryLock.release() inventoryLock.release()
return return
objectType = 'pubkey' objectType = 'pubkey'
inventory[inventoryHash] = ( addInventory(inventoryHash, objectType, streamNumber, data, embeddedTime, tag)
objectType, streamNumber, data, embeddedTime, tag)
inventorySets[streamNumber].add(inventoryHash)
inventoryLock.release() inventoryLock.release()
# This object is valid. Forward it to peers. # This object is valid. Forward it to peers.
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex')) logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
@ -776,9 +774,7 @@ def checkAndShareBroadcastWithPeers(data):
return return
# It is valid. Let's let our peers know about it. # It is valid. Let's let our peers know about it.
objectType = 'broadcast' objectType = 'broadcast'
inventory[inventoryHash] = ( addInventory(inventoryHash, objectType, streamNumber, data, embeddedTime, tag)
objectType, streamNumber, data, embeddedTime, tag)
inventorySets[streamNumber].add(inventoryHash)
inventoryLock.release() inventoryLock.release()
# This object is valid. Forward it to peers. # This object is valid. Forward it to peers.
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex')) logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
@ -792,6 +788,18 @@ def checkAndShareBroadcastWithPeers(data):
shared.objectProcessorQueueSize += len(data) shared.objectProcessorQueueSize += len(data)
objectProcessorQueue.put((objectType,data)) objectProcessorQueue.put((objectType,data))
def addInventory(inventoryHash, objectType, streamNumber, data, embeddedTime, tag):
#add object to inventory
inventory[inventoryHash] = (
objectType, streamNumber, data, embeddedTime, tag)
inventorySets[streamNumber].add(inventoryHash)
#include object in POW statistics
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8]) #calculate POW
nonceTrialsPerByteActual = (2 ** 64)/(POW*len(data)) #calculate nonceTrialsPerByteActual
shared.countNonceTrialsPerByteActual += 1 #update count for average
shared.averageNonceTrialsPerByteActual += (nonceTrialsPerByteActual-shared.averageNonceTrialsPerByteActual)/shared.countNonceTrialsPerByteActual #update inventory average of nonceTrialsPerByteActual
helper_startup.loadConfig() helper_startup.loadConfig()
from debug import logger from debug import logger