The idea behind this fix is to prioritize the outgoing object inventory by proof of work strength and calculate at least as much proof of work on the messages we originate so they never have much less than average priority. To achieve this goal the following changes were made:
-arithmetic.py- Defines isqrt function to calculate integer square roots of large integers. -shared.py- Defines averageNonceTrialsPerByteActual, countNonceTrialsPerByteActual, and the addInventory function (to calculate these values and add objects to inventory). Code updated to call addInventory when adding objects to inventory. -helper_startup.py- Adds 'averagenoncetrialsperbyteactual' and 'countnoncetrialsperbyteactual' to newly generated keys.dat files. -class_sqlThread.py- Reads 'averagenoncetrialsperbyteactual' and 'countnoncetrialsperbyteactual' from keys.dat (or adds them if they don't exist) and resets 'countnoncetrialsperbyteactual' to its square root (with arithmetic.isqrt) when vacuuming messages.dat. -class_singleCleaner.py- Writes 'averagenoncetrialsperbyteactual' and 'countnoncetrialsperbyteactual' to disk when the sql database is written to disk. -class_singleWorker.py- Defines prioritizeTarget function (to calculate POW targets with a minimum of averageNonceTrialsPerByteActual, for large inventories, and averageNonceTrialsPerByteActual/2, for small inventories). Code updated to call shared.addInventory when adding objects to inventory and to call prioritizeTarget as the last step when calculating targets. -class_receiveDataThread- Sorts outgoing inventory by decreasing nonceTrialsPerByteActual (ie increasing proof strength). Warning: "Max acceptable difficulty" settings are not implemented as method for limiting the POW specified by the new parameter. We will not be implementing it, but will instead focus on porting the spam fix to the upcoming V3 protocol.
This commit is contained in:
parent
13db5fe00c
commit
533e2dd99b
|
@ -276,15 +276,17 @@ class receiveDataThread(threading.Thread):
|
|||
# Select all hashes which are younger than two days old and in this
|
||||
# stream.
|
||||
queryreturn = sqlQuery(
|
||||
'''SELECT hash FROM inventory WHERE ((receivedtime>? and objecttype<>'pubkey') or (receivedtime>? and objecttype='pubkey')) and streamnumber=?''',
|
||||
'''SELECT hash, payload FROM inventory WHERE ((receivedtime>? and objecttype<>'pubkey') or (receivedtime>? and objecttype='pubkey')) and streamnumber=?''',
|
||||
int(time.time()) - shared.maximumAgeOfObjectsThatIAdvertiseToOthers,
|
||||
int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys,
|
||||
self.streamNumber)
|
||||
bigInvList = {}
|
||||
for row in queryreturn:
|
||||
hash, = row
|
||||
hash, payload = row
|
||||
if hash not in self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware:
|
||||
bigInvList[hash] = 0
|
||||
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(payload[
|
||||
:8] + hashlib.sha512(payload[8:]).digest()).digest()).digest()[0:8]) #calculate POW
|
||||
bigInvList[hash] = (2 ** 64)/(POW*len(payload)) #calculate nonceTrialsPerByteActual
|
||||
# We also have messages in our inventory in memory (which is a python
|
||||
# dictionary). Let's fetch those too.
|
||||
with shared.inventoryLock:
|
||||
|
@ -292,12 +294,14 @@ class receiveDataThread(threading.Thread):
|
|||
if hash not in self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware:
|
||||
objectType, streamNumber, payload, receivedTime, tag = storedValue
|
||||
if streamNumber == self.streamNumber and receivedTime > int(time.time()) - shared.maximumAgeOfObjectsThatIAdvertiseToOthers:
|
||||
bigInvList[hash] = 0
|
||||
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(payload[
|
||||
:8] + hashlib.sha512(payload[8:]).digest()).digest()).digest()[0:8]) #calculate POW
|
||||
bigInvList[hash] = (2 ** 64)/(POW*len(payload)) #calculate nonceTrialsPerByteActual
|
||||
numberOfObjectsInInvMessage = 0
|
||||
payload = ''
|
||||
# Now let us start appending all of these hashes together. They will be
|
||||
# sent out in a big inv message to our new peer.
|
||||
for hash, storedValue in bigInvList.items():
|
||||
# Now let us start appending all of these hashes together. They will be sent out in a
|
||||
# big inv message to our new peer with the stronger-for-their-length POW objects first
|
||||
for hash in sorted(bigInvList, key=bigInvList.__getitem__, reverse=True):
|
||||
payload += hash
|
||||
numberOfObjectsInInvMessage += 1
|
||||
if numberOfObjectsInInvMessage == 50000: # We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
|
||||
|
|
|
@ -56,6 +56,10 @@ class singleCleaner(threading.Thread):
|
|||
receivedTime,
|
||||
tag)
|
||||
del shared.inventory[hash]
|
||||
shared.config.set('bitmessagesettings', 'averagenoncetrialsperbyteactual', str(shared.averageNonceTrialsPerByteActual))
|
||||
shared.config.set('bitmessagesettings', 'countnoncetrialsperbyteactual', str(shared.countNonceTrialsPerByteActual))
|
||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
||||
shared.config.write(configfile)
|
||||
shared.UISignalQueue.put(('updateStatusBar', ''))
|
||||
shared.broadcastToSendDataQueues((
|
||||
0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
|
||||
|
|
|
@ -130,6 +130,7 @@ class singleWorker(threading.Thread):
|
|||
# Do the POW for this pubkey message
|
||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
self.prioritizeTarget(target, payload)
|
||||
print '(For pubkey message) Doing proof of work...'
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
|
@ -138,9 +139,7 @@ class singleWorker(threading.Thread):
|
|||
|
||||
inventoryHash = calculateInventoryHash(payload)
|
||||
objectType = 'pubkey'
|
||||
shared.inventory[inventoryHash] = (
|
||||
objectType, streamNumber, payload, embeddedTime,'')
|
||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||
shared.addInventory(inventoryHash, objectType, streamNumber, payload, embeddedTime, '')
|
||||
|
||||
with shared.printLock:
|
||||
print 'broadcasting inv with hash:', inventoryHash.encode('hex')
|
||||
|
@ -216,6 +215,7 @@ class singleWorker(threading.Thread):
|
|||
# Do the POW for this pubkey message
|
||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
self.prioritizeTarget(target, payload)
|
||||
print '(For pubkey message) Doing proof of work...'
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
|
@ -224,9 +224,7 @@ class singleWorker(threading.Thread):
|
|||
payload = pack('>Q', nonce) + payload
|
||||
inventoryHash = calculateInventoryHash(payload)
|
||||
objectType = 'pubkey'
|
||||
shared.inventory[inventoryHash] = (
|
||||
objectType, streamNumber, payload, embeddedTime,'')
|
||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||
shared.addInventory(inventoryHash, objectType, streamNumber, payload, embeddedTime, '')
|
||||
|
||||
with shared.printLock:
|
||||
print 'broadcasting inv with hash:', inventoryHash.encode('hex')
|
||||
|
@ -312,6 +310,7 @@ class singleWorker(threading.Thread):
|
|||
# Do the POW for this pubkey message
|
||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
self.prioritizeTarget(target, payload)
|
||||
print '(For pubkey message) Doing proof of work...'
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
|
@ -320,9 +319,7 @@ class singleWorker(threading.Thread):
|
|||
payload = pack('>Q', nonce) + payload
|
||||
inventoryHash = calculateInventoryHash(payload)
|
||||
objectType = 'pubkey'
|
||||
shared.inventory[inventoryHash] = (
|
||||
objectType, streamNumber, payload, embeddedTime, doubleHashOfAddressData[32:])
|
||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||
shared.addInventory(inventoryHash, objectType, streamNumber, payload, embeddedTime, doubleHashOfAddressData[32:])
|
||||
|
||||
with shared.printLock:
|
||||
print 'broadcasting inv with hash:', inventoryHash.encode('hex')
|
||||
|
@ -422,6 +419,8 @@ class singleWorker(threading.Thread):
|
|||
|
||||
target = 2 ** 64 / ((len(
|
||||
payload) + shared.networkDefaultPayloadLengthExtraBytes + 8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
if ( target > (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)):
|
||||
target = (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)
|
||||
print '(For broadcast message) Doing proof of work...'
|
||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (
|
||||
ackdata, tr.translateText("MainWindow", "Doing work necessary to send broadcast..."))))
|
||||
|
@ -433,9 +432,7 @@ class singleWorker(threading.Thread):
|
|||
|
||||
inventoryHash = calculateInventoryHash(payload)
|
||||
objectType = 'broadcast'
|
||||
shared.inventory[inventoryHash] = (
|
||||
objectType, streamNumber, payload, int(time.time()),tag)
|
||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||
shared.addInventory(inventoryHash, objectType, streamNumber, payload, int(time.time()), tag)
|
||||
with shared.printLock:
|
||||
print 'sending inv (within sendBroadcast function) for object:', inventoryHash.encode('hex')
|
||||
shared.broadcastToSendDataQueues((
|
||||
|
@ -807,6 +804,8 @@ class singleWorker(threading.Thread):
|
|||
continue
|
||||
encryptedPayload = embeddedTime + encodeVarint(toStreamNumber) + encrypted
|
||||
target = 2**64 / ((len(encryptedPayload)+requiredPayloadLengthExtraBytes+8) * requiredAverageProofOfWorkNonceTrialsPerByte)
|
||||
if ( target > (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)):
|
||||
target = (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)
|
||||
with shared.printLock:
|
||||
print '(For msg message) Doing proof of work. Total required difficulty:', float(requiredAverageProofOfWorkNonceTrialsPerByte) / shared.networkDefaultProofOfWorkNonceTrialsPerByte, 'Required small message difficulty:', float(requiredPayloadLengthExtraBytes) / shared.networkDefaultPayloadLengthExtraBytes
|
||||
|
||||
|
@ -824,9 +823,7 @@ class singleWorker(threading.Thread):
|
|||
|
||||
inventoryHash = calculateInventoryHash(encryptedPayload)
|
||||
objectType = 'msg'
|
||||
shared.inventory[inventoryHash] = (
|
||||
objectType, toStreamNumber, encryptedPayload, int(time.time()),'')
|
||||
shared.inventorySets[toStreamNumber].add(inventoryHash)
|
||||
shared.addInventory(inventoryHash, objectType, toStreamNumber, encryptedPayload, int(time.time()), '')
|
||||
if shared.config.has_section(toaddress):
|
||||
shared.UISignalQueue.put(('updateSentItemStatusByAckdata', (ackdata, tr.translateText("MainWindow", "Message sent. Sent on %1").arg(l10n.formatTimestamp()))))
|
||||
else:
|
||||
|
@ -902,6 +899,7 @@ class singleWorker(threading.Thread):
|
|||
ripe, tr.translateText("MainWindow",'Doing work necessary to request encryption key.'))))
|
||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
self.prioritizeTarget(target, payload)
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
with shared.printLock:
|
||||
|
@ -911,9 +909,7 @@ class singleWorker(threading.Thread):
|
|||
payload = pack('>Q', nonce) + payload
|
||||
inventoryHash = calculateInventoryHash(payload)
|
||||
objectType = 'getpubkey'
|
||||
shared.inventory[inventoryHash] = (
|
||||
objectType, streamNumber, payload, int(time.time()),'')
|
||||
shared.inventorySets[streamNumber].add(inventoryHash)
|
||||
shared.addInventory(inventoryHash, objectType, streamNumber, payload, int(time.time()), '')
|
||||
print 'sending inv (for the getpubkey message)'
|
||||
shared.broadcastToSendDataQueues((
|
||||
streamNumber, 'advertiseobject', inventoryHash))
|
||||
|
@ -932,6 +928,7 @@ class singleWorker(threading.Thread):
|
|||
payload = embeddedTime + encodeVarint(toStreamNumber) + ackdata
|
||||
target = 2 ** 64 / ((len(payload) + shared.networkDefaultPayloadLengthExtraBytes +
|
||||
8) * shared.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
self.prioritizeTarget(target, payload)
|
||||
with shared.printLock:
|
||||
print '(For ack message) Doing proof of work...'
|
||||
|
||||
|
@ -947,3 +944,10 @@ class singleWorker(threading.Thread):
|
|||
|
||||
payload = pack('>Q', nonce) + payload
|
||||
return shared.CreatePacket('msg', payload)
|
||||
|
||||
def prioritizeTarget(self, target, payload):
|
||||
#set target to the inventory average of length adjusted targets for
|
||||
#large inventories and twice the inventory average for small inventories
|
||||
if ( target > (2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)):
|
||||
target = (1+1/shared.countNonceTrialsPerByteActual)*(2 ** 64)/(len(payload)*shared.averageNonceTrialsPerByteActual)
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ from namecoin import ensureNamecoinOptions
|
|||
import random
|
||||
import string
|
||||
import tr#anslate
|
||||
from pyelliptic import arithmetic
|
||||
|
||||
# This thread exists because SQLITE3 is so un-threadsafe that we must
|
||||
# submit queries to it and it puts results back in a different queue. They
|
||||
|
@ -302,6 +303,15 @@ class sqlThread(threading.Thread):
|
|||
item = '''update settings set value=? WHERE key='version';'''
|
||||
parameters = (6,)
|
||||
self.cur.execute(item, parameters)
|
||||
|
||||
if shared.config.has_option('bitmessagesettings', 'averagenoncetrialsperbyteactual') and shared.config.has_option('bitmessagesettings', 'countnoncetrialsperbyteactual'):
|
||||
shared.averageNonceTrialsPerByteActual = shared.config.getint('bitmessagesettings', 'averagenoncetrialsperbyteactual')
|
||||
shared.countNonceTrialsPerByteActual = shared.config.getint('bitmessagesettings', 'countnoncetrialsperbyteactual')
|
||||
else:
|
||||
shared.config.set('bitmessagesettings', 'averagenoncetrialsperbyteactual', str(shared.averageNonceTrialsPerByteActual))
|
||||
shared.config.set('bitmessagesettings', 'countnoncetrialsperbyteactual', str(shared.countNonceTrialsPerByteActual))
|
||||
with open(shared.appdata + 'keys.dat', 'wb') as configfile:
|
||||
shared.config.write(configfile)
|
||||
|
||||
# Are you hoping to add a new option to the keys.dat file of existing
|
||||
# Bitmessage users? Add it right above this line!
|
||||
|
@ -345,6 +355,7 @@ class sqlThread(threading.Thread):
|
|||
logger.info('It has been a long time since the messages.dat file has been vacuumed. Vacuuming now...')
|
||||
try:
|
||||
self.cur.execute( ''' VACUUM ''')
|
||||
shared.countNonceTrialsPerByteActual = arithmetic.isqrt(shared.countNonceTrialsPerByteActual)
|
||||
except Exception as err:
|
||||
if str(err) == 'database or disk is full':
|
||||
logger.fatal('(While VACUUM) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
|
||||
|
@ -418,7 +429,8 @@ class sqlThread(threading.Thread):
|
|||
self.cur.execute('''delete from sent where folder='trash' ''')
|
||||
self.conn.commit()
|
||||
try:
|
||||
self.cur.execute( ''' VACUUM ''')
|
||||
self.cur.execute( ''' VACUUM ''')
|
||||
shared.countNonceTrialsPerByteActual = arithmetic.isqrt(shared.countNonceTrialsPerByteActual)
|
||||
except Exception as err:
|
||||
if str(err) == 'database or disk is full':
|
||||
logger.fatal('(while deleteandvacuume) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
|
||||
|
|
|
@ -92,6 +92,8 @@ def loadConfig():
|
|||
shared.networkDefaultProofOfWorkNonceTrialsPerByte * 2))
|
||||
shared.config.set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(
|
||||
shared.networkDefaultPayloadLengthExtraBytes))
|
||||
shared.config.set('bitmessagesettings', 'averagenoncetrialsperbyteactual', str(shared.averageNonceTrialsPerByteActual))
|
||||
shared.config.set('bitmessagesettings', 'countnoncetrialsperbyteactual', str(shared.countNonceTrialsPerByteActual))
|
||||
shared.config.set('bitmessagesettings', 'minimizeonclose', 'false')
|
||||
shared.config.set(
|
||||
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', '0')
|
||||
|
|
|
@ -70,6 +70,21 @@ def base10_multiply(a,n):
|
|||
if (n%2) == 0: return base10_double(base10_multiply(a,n/2))
|
||||
if (n%2) == 1: return base10_add(base10_double(base10_multiply(a,n/2)),a)
|
||||
|
||||
def isqrt(x):
|
||||
if x < 0:
|
||||
raise ValueError('square root not defined for negative numbers')
|
||||
n = int(x)
|
||||
if n == 0:
|
||||
return 0
|
||||
a, b = divmod(n.bit_length(), 2)
|
||||
x = 2**(a+b)
|
||||
while True:
|
||||
y = (x + n//x)//2
|
||||
if y >= x:
|
||||
return x
|
||||
x = y
|
||||
|
||||
|
||||
def hex_to_point(h): return (decode(h[2:66],16),decode(h[66:],16))
|
||||
|
||||
def point_to_hex(p): return '04'+encode(p[0],16,64)+encode(p[1],16,64)
|
||||
|
|
|
@ -85,6 +85,10 @@ streamsInWhichIAmParticipating = {}
|
|||
networkDefaultProofOfWorkNonceTrialsPerByte = 320 #The amount of work that should be performed (and demanded) per byte of the payload. Double this number to double the work.
|
||||
networkDefaultPayloadLengthExtraBytes = 14000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
|
||||
|
||||
#inventory average (and count) of nonce trials per byte of actual payload (excluding extra bytes)
|
||||
averageNonceTrialsPerByteActual = (networkDefaultPayloadLengthExtraBytes * networkDefaultProofOfWorkNonceTrialsPerByte)/50
|
||||
countNonceTrialsPerByteActual = 1
|
||||
|
||||
# Remember here the RPC port read from namecoin.conf so we can restore to
|
||||
# it as default whenever the user changes the "method" selection for
|
||||
# namecoin integration to "namecoind".
|
||||
|
@ -577,9 +581,7 @@ def checkAndShareMsgWithPeers(data):
|
|||
return
|
||||
# This msg message is valid. Let's let our peers know about it.
|
||||
objectType = 'msg'
|
||||
inventory[inventoryHash] = (
|
||||
objectType, streamNumberAsClaimedByMsg, data, embeddedTime,'')
|
||||
inventorySets[streamNumberAsClaimedByMsg].add(inventoryHash)
|
||||
addInventory(inventoryHash, objectType, streamNumberAsClaimedByMsg, data, embeddedTime, '')
|
||||
inventoryLock.release()
|
||||
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
|
||||
broadcastToSendDataQueues((streamNumberAsClaimedByMsg, 'advertiseobject', inventoryHash))
|
||||
|
@ -639,9 +641,7 @@ def checkAndSharegetpubkeyWithPeers(data):
|
|||
return
|
||||
|
||||
objectType = 'getpubkey'
|
||||
inventory[inventoryHash] = (
|
||||
objectType, streamNumber, data, embeddedTime,'')
|
||||
inventorySets[streamNumber].add(inventoryHash)
|
||||
addInventory(inventoryHash, objectType, streamNumber, data, embeddedTime, '')
|
||||
inventoryLock.release()
|
||||
# This getpubkey request is valid. Forward to peers.
|
||||
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
|
||||
|
@ -707,9 +707,7 @@ def checkAndSharePubkeyWithPeers(data):
|
|||
inventoryLock.release()
|
||||
return
|
||||
objectType = 'pubkey'
|
||||
inventory[inventoryHash] = (
|
||||
objectType, streamNumber, data, embeddedTime, tag)
|
||||
inventorySets[streamNumber].add(inventoryHash)
|
||||
addInventory(inventoryHash, objectType, streamNumber, data, embeddedTime, tag)
|
||||
inventoryLock.release()
|
||||
# This object is valid. Forward it to peers.
|
||||
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
|
||||
|
@ -776,9 +774,7 @@ def checkAndShareBroadcastWithPeers(data):
|
|||
return
|
||||
# It is valid. Let's let our peers know about it.
|
||||
objectType = 'broadcast'
|
||||
inventory[inventoryHash] = (
|
||||
objectType, streamNumber, data, embeddedTime, tag)
|
||||
inventorySets[streamNumber].add(inventoryHash)
|
||||
addInventory(inventoryHash, objectType, streamNumber, data, embeddedTime, tag)
|
||||
inventoryLock.release()
|
||||
# This object is valid. Forward it to peers.
|
||||
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
|
||||
|
@ -792,6 +788,18 @@ def checkAndShareBroadcastWithPeers(data):
|
|||
shared.objectProcessorQueueSize += len(data)
|
||||
objectProcessorQueue.put((objectType,data))
|
||||
|
||||
def addInventory(inventoryHash, objectType, streamNumber, data, embeddedTime, tag):
|
||||
#add object to inventory
|
||||
inventory[inventoryHash] = (
|
||||
objectType, streamNumber, data, embeddedTime, tag)
|
||||
inventorySets[streamNumber].add(inventoryHash)
|
||||
|
||||
#include object in POW statistics
|
||||
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
|
||||
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8]) #calculate POW
|
||||
nonceTrialsPerByteActual = (2 ** 64)/(POW*len(data)) #calculate nonceTrialsPerByteActual
|
||||
shared.countNonceTrialsPerByteActual += 1 #update count for average
|
||||
shared.averageNonceTrialsPerByteActual += (nonceTrialsPerByteActual-shared.averageNonceTrialsPerByteActual)/shared.countNonceTrialsPerByteActual #update inventory average of nonceTrialsPerByteActual
|
||||
|
||||
helper_startup.loadConfig()
|
||||
from debug import logger
|
||||
|
|
Reference in New Issue
Block a user