Solved sending message issues
This commit is contained in:
parent
eb2b948576
commit
ec618d9d1a
|
@ -141,7 +141,7 @@ class objectProcessor(threading.Thread):
|
||||||
|
|
||||||
if bytes(data[readPosition:]) in shared.ackdataForWhichImWatching:
|
if bytes(data[readPosition:]) in shared.ackdataForWhichImWatching:
|
||||||
logger.info('This object is an acknowledgement bound for me.')
|
logger.info('This object is an acknowledgement bound for me.')
|
||||||
del shared.ackdataForWhichImWatching[data[readPosition:]]
|
del shared.ackdataForWhichImWatching[bytes(data[readPosition:])]
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'UPDATE sent SET status=?, lastactiontime=?'
|
'UPDATE sent SET status=?, lastactiontime=?'
|
||||||
' WHERE ackdata=?',
|
' WHERE ackdata=?',
|
||||||
|
@ -234,7 +234,6 @@ class objectProcessor(threading.Thread):
|
||||||
logger.debug(
|
logger.debug(
|
||||||
'the tag requested in this getpubkey request is: %s',
|
'the tag requested in this getpubkey request is: %s',
|
||||||
hexlify(requestedTag))
|
hexlify(requestedTag))
|
||||||
# import pdb;pdb.set_trace()
|
|
||||||
if bytes(requestedTag) in shared.myAddressesByTag:
|
if bytes(requestedTag) in shared.myAddressesByTag:
|
||||||
myAddress = shared.myAddressesByTag[bytes(requestedTag)]
|
myAddress = shared.myAddressesByTag[bytes(requestedTag)]
|
||||||
|
|
||||||
|
@ -441,15 +440,12 @@ class objectProcessor(threading.Thread):
|
||||||
return
|
return
|
||||||
|
|
||||||
# Let us try to decrypt the pubkey
|
# Let us try to decrypt the pubkey
|
||||||
print("TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT#################################################", tag)
|
|
||||||
toAddress, _ = state.neededPubkeys[bytes(tag)] #check with py2
|
toAddress, _ = state.neededPubkeys[bytes(tag)] #check with py2
|
||||||
# import pdb;pdb.set_trace()
|
if protocol.decryptAndCheckPubkeyPayload(bytes(data), toAddress) == \
|
||||||
if protocol.decryptAndCheckPubkeyPayload(bytes(data), toAddress) == \
|
|
||||||
'successful':
|
'successful':
|
||||||
# At this point we know that we have been waiting on this
|
# At this point we know that we have been waiting on this
|
||||||
# pubkey. This function will command the workerThread
|
# pubkey. This function will command the workerThread
|
||||||
# to start work on the messages that require it.
|
# to start work on the messages that require it.
|
||||||
print("decryptAndCheckPubkeyPayload completed#########################################################")
|
|
||||||
self.possibleNewPubkey(toAddress)
|
self.possibleNewPubkey(toAddress)
|
||||||
|
|
||||||
# Display timing data
|
# Display timing data
|
||||||
|
|
|
@ -224,6 +224,7 @@ class singleWorker(StoppableThread):
|
||||||
if log_time:
|
if log_time:
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
|
print("nonce calculated value#############################", nonce)
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
'%s Found proof of work %s Nonce: %s',
|
'%s Found proof of work %s Nonce: %s',
|
||||||
log_prefix, trialValue, nonce
|
log_prefix, trialValue, nonce
|
||||||
|
@ -679,7 +680,6 @@ class singleWorker(StoppableThread):
|
||||||
"""Send a message-type object (assemble the object, perform PoW and put it to the inv announcement queue)"""
|
"""Send a message-type object (assemble the object, perform PoW and put it to the inv announcement queue)"""
|
||||||
# pylint: disable=too-many-nested-blocks
|
# pylint: disable=too-many-nested-blocks
|
||||||
# Reset just in case
|
# Reset just in case
|
||||||
# import pdb;pdb.set_trace()
|
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'''UPDATE sent SET status='msgqueued' '''
|
'''UPDATE sent SET status='msgqueued' '''
|
||||||
''' WHERE status IN ('doingpubkeypow', 'doingmsgpow')''')
|
''' WHERE status IN ('doingpubkeypow', 'doingmsgpow')''')
|
||||||
|
@ -690,7 +690,6 @@ class singleWorker(StoppableThread):
|
||||||
''' and folder LIKE '%sent%' ''')
|
''' and folder LIKE '%sent%' ''')
|
||||||
# while we have a msg that needs some work
|
# while we have a msg that needs some work
|
||||||
for row in queryreturn:
|
for row in queryreturn:
|
||||||
print(row, "rowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowrowv")
|
|
||||||
toaddress, fromaddress, subject, message, \
|
toaddress, fromaddress, subject, message, \
|
||||||
ackdata, status, TTL, retryNumber, encoding = row
|
ackdata, status, TTL, retryNumber, encoding = row
|
||||||
# toStatus
|
# toStatus
|
||||||
|
@ -732,7 +731,6 @@ class singleWorker(StoppableThread):
|
||||||
toaddress
|
toaddress
|
||||||
)
|
)
|
||||||
# If we have the needed pubkey in the pubkey table already,
|
# If we have the needed pubkey in the pubkey table already,
|
||||||
print("sendmsg line no 734#####################################################################")
|
|
||||||
if queryreturn != []:
|
if queryreturn != []:
|
||||||
# set the status of this msg to doingmsgpow
|
# set the status of this msg to doingmsgpow
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
|
@ -753,7 +751,6 @@ class singleWorker(StoppableThread):
|
||||||
)
|
)
|
||||||
# We don't have the needed pubkey in the pubkeys table already.
|
# We don't have the needed pubkey in the pubkeys table already.
|
||||||
else:
|
else:
|
||||||
print("sendmsg line no 756#####################################################################")
|
|
||||||
if toAddressVersionNumber <= 3:
|
if toAddressVersionNumber <= 3:
|
||||||
toTag = ''
|
toTag = ''
|
||||||
else:
|
else:
|
||||||
|
@ -763,16 +760,14 @@ class singleWorker(StoppableThread):
|
||||||
).digest()).digest()[32:]
|
).digest()).digest()[32:]
|
||||||
if toaddress in state.neededPubkeys or \
|
if toaddress in state.neededPubkeys or \
|
||||||
toTag in state.neededPubkeys:
|
toTag in state.neededPubkeys:
|
||||||
print("sendmsg line no 766#####################################################################")
|
|
||||||
# We already sent a request for the pubkey
|
# We already sent a request for the pubkey
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
'''UPDATE sent SET status='awaitingpubkey', '''
|
'''UPDATE sent SET status='awaitingpubkey', '''
|
||||||
''' sleeptill=? WHERE toaddress=? '''
|
''' sleeptill=? WHERE toaddress=? '''
|
||||||
''' AND status='msgqueued' ''',
|
''' AND status='msgqueued' ''',
|
||||||
int(10),
|
int(time.time()) + 2.5 * 24 * 60 * 60,
|
||||||
toaddress
|
toaddress
|
||||||
)
|
)
|
||||||
print("sendmsg line no 774#####################################################################")
|
|
||||||
queues.UISignalQueue.put((
|
queues.UISignalQueue.put((
|
||||||
'updateSentItemStatusByToAddress', (
|
'updateSentItemStatusByToAddress', (
|
||||||
toaddress,
|
toaddress,
|
||||||
|
@ -780,11 +775,9 @@ class singleWorker(StoppableThread):
|
||||||
"MainWindow",
|
"MainWindow",
|
||||||
"Encryption key was requested earlier."))
|
"Encryption key was requested earlier."))
|
||||||
))
|
))
|
||||||
print("sendmsg line no 783#####################################################################")
|
|
||||||
# on with the next msg on which we can do some work
|
# on with the next msg on which we can do some work
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
print("sendmsg line no 785#####################################################################")
|
|
||||||
# We have not yet sent a request for the pubkey
|
# We have not yet sent a request for the pubkey
|
||||||
needToRequestPubkey = True
|
needToRequestPubkey = True
|
||||||
# If we are trying to send to address
|
# If we are trying to send to address
|
||||||
|
@ -812,7 +805,6 @@ class singleWorker(StoppableThread):
|
||||||
highlevelcrypto.makeCryptor(
|
highlevelcrypto.makeCryptor(
|
||||||
hexlify(privEncryptionKey))
|
hexlify(privEncryptionKey))
|
||||||
)
|
)
|
||||||
|
|
||||||
for value in Inventory().by_type_and_tag(1, toTag):
|
for value in Inventory().by_type_and_tag(1, toTag):
|
||||||
# if valid, this function also puts it
|
# if valid, this function also puts it
|
||||||
# in the pubkeys table.
|
# in the pubkeys table.
|
||||||
|
@ -858,7 +850,6 @@ class singleWorker(StoppableThread):
|
||||||
self.requestPubKey(toaddress)
|
self.requestPubKey(toaddress)
|
||||||
# on with the next msg on which we can do some work
|
# on with the next msg on which we can do some work
|
||||||
continue
|
continue
|
||||||
print("sendmsg line no 856#####################################################################")
|
|
||||||
# At this point we know that we have the necessary pubkey
|
# At this point we know that we have the necessary pubkey
|
||||||
# in the pubkeys table.
|
# in the pubkeys table.
|
||||||
TTL *= 2**retryNumber
|
TTL *= 2**retryNumber
|
||||||
|
@ -867,10 +858,8 @@ class singleWorker(StoppableThread):
|
||||||
# add some randomness to the TTL
|
# add some randomness to the TTL
|
||||||
TTL = int(TTL + helper_random.randomrandrange(-300, 300))
|
TTL = int(TTL + helper_random.randomrandrange(-300, 300))
|
||||||
embeddedTime = int(time.time() + TTL)
|
embeddedTime = int(time.time() + TTL)
|
||||||
print("sendmsg line no 870#####################################################################")
|
|
||||||
# if we aren't sending this to ourselves or a chan
|
# if we aren't sending this to ourselves or a chan
|
||||||
if not BMConfigParser().has_section(toaddress):
|
if not BMConfigParser().has_section(toaddress):
|
||||||
print("sendmsg line no 873#####################################################################")
|
|
||||||
shared.ackdataForWhichImWatching[ackdata] = 0
|
shared.ackdataForWhichImWatching[ackdata] = 0
|
||||||
queues.UISignalQueue.put((
|
queues.UISignalQueue.put((
|
||||||
'updateSentItemStatusByAckdata', (
|
'updateSentItemStatusByAckdata', (
|
||||||
|
@ -878,22 +867,23 @@ class singleWorker(StoppableThread):
|
||||||
tr._translate(
|
tr._translate(
|
||||||
"MainWindow",
|
"MainWindow",
|
||||||
"Looking up the receiver\'s public key"))
|
"Looking up the receiver\'s public key"))
|
||||||
))
|
))
|
||||||
self.logger.info('Sending a message.')
|
self.logger.info('Sending a message.')
|
||||||
self.logger.debug(
|
# self.logger.debug(
|
||||||
'First 150 characters of message: %s',
|
# 'First 150 characters of message: %s',
|
||||||
repr(message[:150])
|
# repr(message[:150])
|
||||||
)
|
# )
|
||||||
|
|
||||||
# Let us fetch the recipient's public key out of
|
# Let us fetch the recipient's public key out of
|
||||||
# our database. If the required proof of work difficulty
|
# our database. If the required proof of work difficulty
|
||||||
# is too hard then we'll abort.
|
# is too hard then we'll abort.
|
||||||
|
|
||||||
queryreturn = sqlQuery(
|
queryreturn = sqlQuery(
|
||||||
'SELECT transmitdata FROM pubkeys WHERE address=?',
|
'SELECT transmitdata FROM pubkeys WHERE address=?',
|
||||||
toaddress)
|
toaddress)
|
||||||
for row in queryreturn: # pylint: disable=redefined-outer-name
|
for row in queryreturn: # pylint: disable=redefined-outer-name
|
||||||
pubkeyPayload, = row
|
pubkeyPayload, = row
|
||||||
|
|
||||||
# The pubkey message is stored with the following items
|
# The pubkey message is stored with the following items
|
||||||
# all appended:
|
# all appended:
|
||||||
# -address version
|
# -address version
|
||||||
|
@ -917,7 +907,7 @@ class singleWorker(StoppableThread):
|
||||||
|
|
||||||
# if receiver is a mobile device who expects that their
|
# if receiver is a mobile device who expects that their
|
||||||
# address RIPE is included unencrypted on the front of
|
# address RIPE is included unencrypted on the front of
|
||||||
# the message..
|
# the message..
|
||||||
if protocol.isBitSetWithinBitfield(behaviorBitfield, 30):
|
if protocol.isBitSetWithinBitfield(behaviorBitfield, 30):
|
||||||
# if we are Not willing to include the receiver's
|
# if we are Not willing to include the receiver's
|
||||||
# RIPE hash on the message..
|
# RIPE hash on the message..
|
||||||
|
@ -954,9 +944,8 @@ class singleWorker(StoppableThread):
|
||||||
pubEncryptionKeyBase256 = pubkeyPayload[
|
pubEncryptionKeyBase256 = pubkeyPayload[
|
||||||
readPosition:readPosition + 64]
|
readPosition:readPosition + 64]
|
||||||
readPosition += 64
|
readPosition += 64
|
||||||
|
|
||||||
# Let us fetch the amount of work required by the recipient.
|
# Let us fetch the amount of work required by the recipient.
|
||||||
if toAddressVersionNumber == 2:
|
if toAddressVersionNumber == 2:
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte = \
|
requiredAverageProofOfWorkNonceTrialsPerByte = \
|
||||||
defaults.networkDefaultProofOfWorkNonceTrialsPerByte
|
defaults.networkDefaultProofOfWorkNonceTrialsPerByte
|
||||||
requiredPayloadLengthExtraBytes = \
|
requiredPayloadLengthExtraBytes = \
|
||||||
|
@ -970,6 +959,7 @@ class singleWorker(StoppableThread):
|
||||||
"There is no required difficulty for"
|
"There is no required difficulty for"
|
||||||
" version 2 addresses like this."))
|
" version 2 addresses like this."))
|
||||||
))
|
))
|
||||||
|
|
||||||
elif toAddressVersionNumber >= 3:
|
elif toAddressVersionNumber >= 3:
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte, \
|
requiredAverageProofOfWorkNonceTrialsPerByte, \
|
||||||
varintLength = decodeVarint(
|
varintLength = decodeVarint(
|
||||||
|
@ -996,7 +986,6 @@ class singleWorker(StoppableThread):
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte,
|
requiredAverageProofOfWorkNonceTrialsPerByte,
|
||||||
requiredPayloadLengthExtraBytes
|
requiredPayloadLengthExtraBytes
|
||||||
)
|
)
|
||||||
|
|
||||||
queues.UISignalQueue.put(
|
queues.UISignalQueue.put(
|
||||||
(
|
(
|
||||||
'updateSentItemStatusByAckdata',
|
'updateSentItemStatusByAckdata',
|
||||||
|
@ -1021,17 +1010,15 @@ class singleWorker(StoppableThread):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if status != 'forcepow':
|
if status != 'forcepow':
|
||||||
maxacceptablenoncetrialsperbyte = BMConfigParser().getint(
|
maxacceptablenoncetrialsperbyte = int(BMConfigParser().get(
|
||||||
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte')
|
'bitmessagesettings', 'maxacceptablenoncetrialsperbyte'))
|
||||||
maxacceptablepayloadlengthextrabytes = BMConfigParser().getint(
|
maxacceptablepayloadlengthextrabytes = int(BMConfigParser().get(
|
||||||
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes')
|
'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes'))
|
||||||
cond1 = maxacceptablenoncetrialsperbyte and \
|
cond1 = maxacceptablenoncetrialsperbyte and \
|
||||||
requiredAverageProofOfWorkNonceTrialsPerByte > maxacceptablenoncetrialsperbyte
|
requiredAverageProofOfWorkNonceTrialsPerByte > maxacceptablenoncetrialsperbyte
|
||||||
cond2 = maxacceptablepayloadlengthextrabytes and \
|
cond2 = maxacceptablepayloadlengthextrabytes and \
|
||||||
requiredPayloadLengthExtraBytes > maxacceptablepayloadlengthextrabytes
|
requiredPayloadLengthExtraBytes > maxacceptablepayloadlengthextrabytes
|
||||||
|
|
||||||
if cond1 or cond2:
|
if cond1 or cond2:
|
||||||
# The demanded difficulty is more than
|
# The demanded difficulty is more than
|
||||||
# we are willing to do.
|
# we are willing to do.
|
||||||
|
@ -1055,12 +1042,10 @@ class singleWorker(StoppableThread):
|
||||||
l10n.formatTimestamp()))))
|
l10n.formatTimestamp()))))
|
||||||
continue
|
continue
|
||||||
else: # if we are sending a message to ourselves or a chan..
|
else: # if we are sending a message to ourselves or a chan..
|
||||||
print("sendmsg line no 1058#####################################################################")
|
|
||||||
self.logger.info('Sending a message.')
|
self.logger.info('Sending a message.')
|
||||||
self.logger.debug(
|
self.logger.debug(
|
||||||
'First 150 characters of message: %r', message[:150])
|
'First 150 characters of message: %r', message[:150])
|
||||||
behaviorBitfield = protocol.getBitfield(fromaddress)
|
behaviorBitfield = protocol.getBitfield(fromaddress)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
privEncryptionKeyBase58 = BMConfigParser().get(
|
privEncryptionKeyBase58 = BMConfigParser().get(
|
||||||
toaddress, 'privencryptionkey')
|
toaddress, 'privencryptionkey')
|
||||||
|
@ -1097,7 +1082,6 @@ class singleWorker(StoppableThread):
|
||||||
"MainWindow",
|
"MainWindow",
|
||||||
"Doing work necessary to send message."))
|
"Doing work necessary to send message."))
|
||||||
))
|
))
|
||||||
print("sendmsg line no 1093#####################################################################")
|
|
||||||
# Now we can start to assemble our message.
|
# Now we can start to assemble our message.
|
||||||
payload = encodeVarint(fromAddressVersionNumber)
|
payload = encodeVarint(fromAddressVersionNumber)
|
||||||
payload += encodeVarint(fromStreamNumber)
|
payload += encodeVarint(fromStreamNumber)
|
||||||
|
@ -1105,10 +1089,8 @@ class singleWorker(StoppableThread):
|
||||||
# that can be expected from me. (See
|
# that can be expected from me. (See
|
||||||
# https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features)
|
# https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features)
|
||||||
payload += protocol.getBitfield(fromaddress)
|
payload += protocol.getBitfield(fromaddress)
|
||||||
print("sendmsg line no 1101#####################################################################")
|
|
||||||
# We need to convert our private keys to public keys in order
|
# We need to convert our private keys to public keys in order
|
||||||
# to include them.
|
# to include them.
|
||||||
# import pdb; pdb.set_trace()
|
|
||||||
try:
|
try:
|
||||||
privSigningKeyHex, privEncryptionKeyHex, \
|
privSigningKeyHex, privEncryptionKeyHex, \
|
||||||
pubSigningKey, pubEncryptionKey = self._getKeysForAddress(
|
pubSigningKey, pubEncryptionKey = self._getKeysForAddress(
|
||||||
|
@ -1123,9 +1105,7 @@ class singleWorker(StoppableThread):
|
||||||
" (your address) in the keys.dat file."))
|
" (your address) in the keys.dat file."))
|
||||||
))
|
))
|
||||||
continue
|
continue
|
||||||
print("sendmsg line no 1119#####################################################################")
|
|
||||||
payload += pubSigningKey + pubEncryptionKey
|
payload += pubSigningKey + pubEncryptionKey
|
||||||
|
|
||||||
if fromAddressVersionNumber >= 3:
|
if fromAddressVersionNumber >= 3:
|
||||||
# If the receiver of our message is in our address book,
|
# If the receiver of our message is in our address book,
|
||||||
# subscriptions list, or whitelist then we will allow them to
|
# subscriptions list, or whitelist then we will allow them to
|
||||||
|
@ -1142,13 +1122,11 @@ class singleWorker(StoppableThread):
|
||||||
fromaddress, 'noncetrialsperbyte')))
|
fromaddress, 'noncetrialsperbyte')))
|
||||||
payload += encodeVarint(int(BMConfigParser().get(
|
payload += encodeVarint(int(BMConfigParser().get(
|
||||||
fromaddress, 'payloadlengthextrabytes')))
|
fromaddress, 'payloadlengthextrabytes')))
|
||||||
print('@@@@@@@@@@@@@@ before payload creating@@@@@@@@@@@@@@@@')
|
|
||||||
# This hash will be checked by the receiver of the message
|
# This hash will be checked by the receiver of the message
|
||||||
# to verify that toRipe belongs to them. This prevents
|
# to verify that toRipe belongs to them. This prevents
|
||||||
# a Surreptitious Forwarding Attack.
|
# a Surreptitious Forwarding Attack.
|
||||||
payload += toRipe
|
payload += toRipe
|
||||||
payload += encodeVarint(encoding) # message encoding type
|
payload += encodeVarint(encoding) # message encoding type
|
||||||
# import pdb;pdb.set_trace()
|
|
||||||
encodedMessage = helper_msgcoding.MsgEncode(
|
encodedMessage = helper_msgcoding.MsgEncode(
|
||||||
{"subject": subject, "body": message}, encoding
|
{"subject": subject, "body": message}, encoding
|
||||||
)
|
)
|
||||||
|
@ -1174,7 +1152,7 @@ class singleWorker(StoppableThread):
|
||||||
fullAckPayload = self.generateFullAckMessage(
|
fullAckPayload = self.generateFullAckMessage(
|
||||||
ackdata, toStreamNumber, TTL)
|
ackdata, toStreamNumber, TTL)
|
||||||
payload += encodeVarint(len(fullAckPayload))
|
payload += encodeVarint(len(fullAckPayload))
|
||||||
payload += fullAckPayload.encode()
|
payload += fullAckPayload if isinstance(fullAckPayload,bytes) else fullAckPayload.encode()
|
||||||
dataToSign = pack('>Q', embeddedTime) + '\x00\x00\x00\x02'.encode() + \
|
dataToSign = pack('>Q', embeddedTime) + '\x00\x00\x00\x02'.encode() + \
|
||||||
encodeVarint(1) + encodeVarint(toStreamNumber) + payload
|
encodeVarint(1) + encodeVarint(toStreamNumber) + payload
|
||||||
signature = highlevelcrypto.sign(dataToSign, privSigningKeyHex)
|
signature = highlevelcrypto.sign(dataToSign, privSigningKeyHex)
|
||||||
|
@ -1201,7 +1179,6 @@ class singleWorker(StoppableThread):
|
||||||
).arg(l10n.formatTimestamp()))
|
).arg(l10n.formatTimestamp()))
|
||||||
))
|
))
|
||||||
continue
|
continue
|
||||||
print('@@@@@@@@@@@@@@ before encryptedPayload creating@@@@@@@@@@@@@@@@')
|
|
||||||
encryptedPayload = pack('>Q', embeddedTime)
|
encryptedPayload = pack('>Q', embeddedTime)
|
||||||
encryptedPayload += '\x00\x00\x00\x02'.encode() # object type: msg
|
encryptedPayload += '\x00\x00\x00\x02'.encode() # object type: msg
|
||||||
encryptedPayload += encodeVarint(1) # msg version
|
encryptedPayload += encodeVarint(1) # msg version
|
||||||
|
@ -1226,10 +1203,7 @@ class singleWorker(StoppableThread):
|
||||||
|
|
||||||
powStartTime = time.time()
|
powStartTime = time.time()
|
||||||
initialHash = hashlib.sha512(encryptedPayload).digest()
|
initialHash = hashlib.sha512(encryptedPayload).digest()
|
||||||
# import pdb; pdb.set_trace()
|
|
||||||
|
|
||||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||||
print("nonce calculated value#############################", nonce)
|
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
'(For msg message) Found proof of work %s Nonce: %s',
|
'(For msg message) Found proof of work %s Nonce: %s',
|
||||||
trialValue, nonce
|
trialValue, nonce
|
||||||
|
@ -1242,8 +1216,6 @@ class singleWorker(StoppableThread):
|
||||||
)
|
)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
print("line no 1234#########################################")
|
|
||||||
# import pdb; pdb.set_trace()
|
|
||||||
encryptedPayload = pack('>Q', nonce) + encryptedPayload
|
encryptedPayload = pack('>Q', nonce) + encryptedPayload
|
||||||
|
|
||||||
# Sanity check. The encryptedPayload size should never be
|
# Sanity check. The encryptedPayload size should never be
|
||||||
|
@ -1257,18 +1229,12 @@ class singleWorker(StoppableThread):
|
||||||
len(encryptedPayload)
|
len(encryptedPayload)
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
print("line no 1248#########################################")
|
|
||||||
inventoryHash = calculateInventoryHash(encryptedPayload)
|
inventoryHash = calculateInventoryHash(encryptedPayload)
|
||||||
print("line no 1250248#########################################")
|
|
||||||
objectType = 2
|
objectType = 2
|
||||||
print("line no 1252#########################################")
|
|
||||||
# import pdb; pdb.set_trace()
|
|
||||||
inventoryHashlist = (
|
inventoryHashlist = (
|
||||||
objectType, toStreamNumber,encryptedPayload, embeddedTime, '')
|
objectType, toStreamNumber,encryptedPayload, embeddedTime, '')
|
||||||
print("line no 1255#########################################")
|
Inventory()._realInventory[inventoryHash] = (
|
||||||
# import pdb; pdb.set_trace()
|
objectType, toStreamNumber, encryptedPayload, embeddedTime, '')
|
||||||
Inventory()[inventoryHashlist]
|
|
||||||
print("line no 1257#########################################")
|
|
||||||
if BMConfigParser().has_section(toaddress) or \
|
if BMConfigParser().has_section(toaddress) or \
|
||||||
not protocol.checkBitfield(behaviorBitfield, protocol.BITFIELD_DOESACK):
|
not protocol.checkBitfield(behaviorBitfield, protocol.BITFIELD_DOESACK):
|
||||||
queues.UISignalQueue.put((
|
queues.UISignalQueue.put((
|
||||||
|
@ -1289,7 +1255,6 @@ class singleWorker(StoppableThread):
|
||||||
" Sent on %1"
|
" Sent on %1"
|
||||||
).arg(l10n.formatTimestamp()))
|
).arg(l10n.formatTimestamp()))
|
||||||
))
|
))
|
||||||
print("line no 1282#########################################")
|
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
'Broadcasting inv for my msg(within sendmsg function): %s',
|
'Broadcasting inv for my msg(within sendmsg function): %s',
|
||||||
hexlify(inventoryHash)
|
hexlify(inventoryHash)
|
||||||
|
@ -1297,7 +1262,6 @@ class singleWorker(StoppableThread):
|
||||||
queues.invQueue.put((toStreamNumber, inventoryHash))
|
queues.invQueue.put((toStreamNumber, inventoryHash))
|
||||||
# Update the sent message in the sent table with the
|
# Update the sent message in the sent table with the
|
||||||
# necessary information.
|
# necessary information.
|
||||||
print("line no 1290#########################################")
|
|
||||||
if BMConfigParser().has_section(toaddress) or \
|
if BMConfigParser().has_section(toaddress) or \
|
||||||
not protocol.checkBitfield(behaviorBitfield, protocol.BITFIELD_DOESACK):
|
not protocol.checkBitfield(behaviorBitfield, protocol.BITFIELD_DOESACK):
|
||||||
newStatus = 'msgsentnoackexpected'
|
newStatus = 'msgsentnoackexpected'
|
||||||
|
@ -1313,7 +1277,6 @@ class singleWorker(StoppableThread):
|
||||||
)
|
)
|
||||||
# If we are sending to ourselves or a chan, let's put
|
# If we are sending to ourselves or a chan, let's put
|
||||||
# the message in our own inbox.
|
# the message in our own inbox.
|
||||||
print("line no 1306#########################################")
|
|
||||||
if BMConfigParser().has_section(toaddress):
|
if BMConfigParser().has_section(toaddress):
|
||||||
# Used to detect and ignore duplicate messages in our inbox
|
# Used to detect and ignore duplicate messages in our inbox
|
||||||
sigHash = hashlib.sha512(hashlib.sha512(
|
sigHash = hashlib.sha512(hashlib.sha512(
|
||||||
|
@ -1340,7 +1303,6 @@ class singleWorker(StoppableThread):
|
||||||
|
|
||||||
def requestPubKey(self, toAddress):
|
def requestPubKey(self, toAddress):
|
||||||
"""Send a getpubkey object"""
|
"""Send a getpubkey object"""
|
||||||
# import pdb;pdb.set_trace()
|
|
||||||
toStatus, addressVersionNumber, streamNumber, ripe = decodeAddress(
|
toStatus, addressVersionNumber, streamNumber, ripe = decodeAddress(
|
||||||
toAddress)
|
toAddress)
|
||||||
if toStatus != 'success':
|
if toStatus != 'success':
|
||||||
|
@ -1350,7 +1312,6 @@ class singleWorker(StoppableThread):
|
||||||
toAddress
|
toAddress
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
queryReturn = sqlQuery(
|
queryReturn = sqlQuery(
|
||||||
'''SELECT retrynumber FROM sent WHERE toaddress=? '''
|
'''SELECT retrynumber FROM sent WHERE toaddress=? '''
|
||||||
''' AND (status='doingpubkeypow' OR status='awaitingpubkey') '''
|
''' AND (status='doingpubkeypow' OR status='awaitingpubkey') '''
|
||||||
|
@ -1365,10 +1326,9 @@ class singleWorker(StoppableThread):
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
retryNumber = queryReturn[0][0]
|
retryNumber = queryReturn[0][0]
|
||||||
|
if addressVersionNumber <= 3:
|
||||||
if addressVersionNumber <= 3:
|
|
||||||
state.neededPubkeys[toAddress] = 0
|
state.neededPubkeys[toAddress] = 0
|
||||||
elif addressVersionNumber >= 4:
|
elif addressVersionNumber >= 4:
|
||||||
# If the user just clicked 'send' then the tag
|
# If the user just clicked 'send' then the tag
|
||||||
# (and other information) will already be in the
|
# (and other information) will already be in the
|
||||||
# neededPubkeys dictionary. But if we are recovering
|
# neededPubkeys dictionary. But if we are recovering
|
||||||
|
@ -1424,17 +1384,14 @@ class singleWorker(StoppableThread):
|
||||||
"MainWindow",
|
"MainWindow",
|
||||||
"Doing work necessary to request encryption key."))
|
"Doing work necessary to request encryption key."))
|
||||||
))
|
))
|
||||||
|
|
||||||
payload = self._doPOWDefaults(payload, TTL)
|
payload = self._doPOWDefaults(payload, TTL)
|
||||||
|
|
||||||
inventoryHash = calculateInventoryHash(payload)
|
inventoryHash = calculateInventoryHash(payload)
|
||||||
objectType = 1
|
objectType = 1
|
||||||
inventoryHashlist = (
|
Inventory()._realInventory[inventoryHash] = (
|
||||||
objectType, streamNumber, payload, embeddedTime, '')
|
objectType, streamNumber, payload, embeddedTime, '')
|
||||||
Inventory()[inventoryHashlist]
|
# Inventory()._realInventory[inventoryHashlist]
|
||||||
self.logger.info('sending inv (for the getpubkey message)')
|
self.logger.info('sending inv (for the getpubkey message)')
|
||||||
queues.invQueue.put((streamNumber, inventoryHash))
|
queues.invQueue.put((streamNumber, inventoryHash))
|
||||||
|
|
||||||
# wait 10% past expiration
|
# wait 10% past expiration
|
||||||
sleeptill = int(time.time() + TTL * 1.1)
|
sleeptill = int(time.time() + TTL * 1.1)
|
||||||
sqlExecute(
|
sqlExecute(
|
||||||
|
@ -1443,7 +1400,6 @@ class singleWorker(StoppableThread):
|
||||||
''' WHERE toaddress=? AND (status='doingpubkeypow' OR '''
|
''' WHERE toaddress=? AND (status='doingpubkeypow' OR '''
|
||||||
''' status='awaitingpubkey') ''',
|
''' status='awaitingpubkey') ''',
|
||||||
int(time.time()), retryNumber + 1, sleeptill, toAddress)
|
int(time.time()), retryNumber + 1, sleeptill, toAddress)
|
||||||
|
|
||||||
queues.UISignalQueue.put((
|
queues.UISignalQueue.put((
|
||||||
'updateStatusBar',
|
'updateStatusBar',
|
||||||
tr._translate(
|
tr._translate(
|
||||||
|
@ -1461,7 +1417,7 @@ class singleWorker(StoppableThread):
|
||||||
).arg(l10n.formatTimestamp()))
|
).arg(l10n.formatTimestamp()))
|
||||||
))
|
))
|
||||||
|
|
||||||
def generateFullAckMessage(self, ackdata, _, TTL):
|
def generateFullAckMessage(self, ackdata, _, TTL):
|
||||||
"""
|
"""
|
||||||
It might be perfectly fine to just use the same TTL for the ackdata that we use for the message. But I would
|
It might be perfectly fine to just use the same TTL for the ackdata that we use for the message. But I would
|
||||||
rather it be more difficult for attackers to associate ackData with the associated msg object. However, users
|
rather it be more difficult for attackers to associate ackData with the associated msg object. However, users
|
||||||
|
|
|
@ -574,7 +574,6 @@ class sqlThread(threading.Thread):
|
||||||
# print 'item', item
|
# print 'item', item
|
||||||
# print 'parameters', parameters
|
# print 'parameters', parameters
|
||||||
# if 'inbox' in item:
|
# if 'inbox' in item:
|
||||||
# import pdb; pdb.set_trace()
|
|
||||||
try:
|
try:
|
||||||
self.cur.execute(item, parameters)
|
self.cur.execute(item, parameters)
|
||||||
rowcount = self.cur.rowcount
|
rowcount = self.cur.rowcount
|
||||||
|
|
|
@ -24,8 +24,6 @@ class Inventory():
|
||||||
|
|
||||||
# cheap inheritance copied from asyncore
|
# cheap inheritance copied from asyncore
|
||||||
def __getattr__(self, attr):
|
def __getattr__(self, attr):
|
||||||
# attr = '__contains__'
|
|
||||||
print('$$$$$$$$$$$$$ inside the __getattr__ item $$$$$$$$$$$$$$$$')
|
|
||||||
if attr == "__contains__":
|
if attr == "__contains__":
|
||||||
self.numberOfInventoryLookupsPerformed += 1
|
self.numberOfInventoryLookupsPerformed += 1
|
||||||
try:
|
try:
|
||||||
|
@ -40,5 +38,4 @@ class Inventory():
|
||||||
|
|
||||||
# hint for pylint: this is dictionary like object
|
# hint for pylint: this is dictionary like object
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
print('@@@@@@@@@@@@@@@@@@ inside the __getitem__ item @@@@@@@@@@@@@@@')
|
|
||||||
return self._realInventory[key]
|
return self._realInventory[key]
|
||||||
|
|
|
@ -116,7 +116,7 @@ class BMObject(object): # pylint: disable=too-many-instance-attributes
|
||||||
# if it's a stem duplicate, pretend we don't have it
|
# if it's a stem duplicate, pretend we don't have it
|
||||||
if Dandelion().hasHash(self.inventoryHash):
|
if Dandelion().hasHash(self.inventoryHash):
|
||||||
return
|
return
|
||||||
if self.inventoryHash in Inventory():
|
if self.inventoryHash in Inventory()._realInventory:
|
||||||
raise BMObjectAlreadyHaveError()
|
raise BMObjectAlreadyHaveError()
|
||||||
|
|
||||||
def checkObjectByType(self):
|
def checkObjectByType(self):
|
||||||
|
|
|
@ -34,20 +34,6 @@ from network.node import Node, Peer
|
||||||
from queues import objectProcessorQueue, portCheckerQueue, invQueue, addrQueue
|
from queues import objectProcessorQueue, portCheckerQueue, invQueue, addrQueue
|
||||||
from network.randomtrackingdict import RandomTrackingDict
|
from network.randomtrackingdict import RandomTrackingDict
|
||||||
|
|
||||||
global addr_count
|
|
||||||
addr_count = 0
|
|
||||||
|
|
||||||
global addr_verack
|
|
||||||
addr_verack = 0
|
|
||||||
|
|
||||||
global addr_version
|
|
||||||
addr_version = 0
|
|
||||||
|
|
||||||
# global addr_count
|
|
||||||
# addr_count = 0
|
|
||||||
|
|
||||||
count = 0
|
|
||||||
|
|
||||||
logger = logging.getLogger('default')
|
logger = logging.getLogger('default')
|
||||||
|
|
||||||
|
|
||||||
|
@ -100,14 +86,6 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
||||||
# its shoule be in string
|
# its shoule be in string
|
||||||
self.command = self.command.rstrip('\x00'.encode('utf-8'))
|
self.command = self.command.rstrip('\x00'.encode('utf-8'))
|
||||||
# pylint: disable=global-statement
|
# pylint: disable=global-statement
|
||||||
global count, addr_version, addr_count, addr_verack
|
|
||||||
count += 1
|
|
||||||
if self.command == 'verack'.encode():
|
|
||||||
addr_verack += 1
|
|
||||||
if self.command == 'version'.encode():
|
|
||||||
addr_version += 1
|
|
||||||
if self.command == 'addr'.encode():
|
|
||||||
addr_count += 1
|
|
||||||
if self.magic != 0xE9BEB4D9:
|
if self.magic != 0xE9BEB4D9:
|
||||||
self.set_state("bm_header", length=1)
|
self.set_state("bm_header", length=1)
|
||||||
self.bm_proto_reset()
|
self.bm_proto_reset()
|
||||||
|
@ -377,11 +355,9 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
||||||
|
|
||||||
# ignore dinv if dandelion turned off
|
# ignore dinv if dandelion turned off
|
||||||
if dandelion and not state.dandelion:
|
if dandelion and not state.dandelion:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
for i in map(bytes, items):
|
for i in map(bytes, items):
|
||||||
import pdb;pdb.set_trace()
|
if i in Inventory()._realInventory and not Dandelion().hasHash(i):
|
||||||
if i in Inventory() and not Dandelion().hasHash(i):
|
|
||||||
continue
|
continue
|
||||||
if dandelion and not Dandelion().hasHash(i):
|
if dandelion and not Dandelion().hasHash(i):
|
||||||
Dandelion().addHash(i, self)
|
Dandelion().addHash(i, self)
|
||||||
|
@ -442,28 +418,15 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if self.object.inventoryHash in Inventory() and Dandelion().hasHash(self.object.inventoryHash):
|
if self.object.inventoryHash in Inventory()._realInventory and Dandelion().hasHash(self.object.inventoryHash):
|
||||||
Dandelion().removeHash(self.object.inventoryHash, "cycle detection")
|
Dandelion().removeHash(self.object.inventoryHash, "cycle detection")
|
||||||
# import pdb; pdb.set_trace()
|
[self.object.inventoryHash] = (
|
||||||
|
|
||||||
inventoryHash_list = [self.object.objectType, self.object.streamNumber,
|
self.object.objectType, self.object.streamNumber,
|
||||||
memoryview(self.payload[objectOffset:]), self.object.expiresTime,
|
memoryview(self.payload[objectOffset:]), self.object.expiresTime,
|
||||||
memoryview(self.object.tag)]
|
memoryview(self.object.tag)
|
||||||
# [self.object.inventoryHash] = (
|
)
|
||||||
|
Inventory()[self.object.inventoryHash]
|
||||||
# self.object.objectType, self.object.streamNumber,
|
|
||||||
# memoryview(self.payload[objectOffset:]), self.object.expiresTime,
|
|
||||||
# memoryview(self.object.tag)
|
|
||||||
# )
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Inventory()[self.object.inventoryHash] = (self.object.objectType, self.object.streamNumber,
|
|
||||||
# buffer(self.payload[objectOffset:]), self.object.expiresTime,
|
|
||||||
# buffer(self.object.tag))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
self.handleReceivedObject(
|
self.handleReceivedObject(
|
||||||
self.object.streamNumber, self.object.inventoryHash)
|
self.object.streamNumber, self.object.inventoryHash)
|
||||||
invQueue.put((
|
invQueue.put((
|
||||||
|
@ -738,4 +701,4 @@ class BMStringParser(BMProto):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
'Exception of type %s while sending ACK',
|
'Exception of type %s while sending ACK',
|
||||||
type(e), exc_info=True)
|
type(e), exc_info=True)
|
|
@ -61,7 +61,7 @@ class DownloadThread(StoppableThread):
|
||||||
payload = bytearray()
|
payload = bytearray()
|
||||||
chunkCount = 0
|
chunkCount = 0
|
||||||
for chunk in request:
|
for chunk in request:
|
||||||
if chunk in Inventory() and not Dandelion().hasHash(chunk):
|
if chunk in Inventory()._realInventory and not Dandelion().hasHash(chunk):
|
||||||
try:
|
try:
|
||||||
del i.objectsNewToMe[chunk]
|
del i.objectsNewToMe[chunk]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
|
|
|
@ -88,7 +88,6 @@ class InvThread(StoppableThread):
|
||||||
except KeyError:
|
except KeyError:
|
||||||
fluffs.append(inv[1])
|
fluffs.append(inv[1])
|
||||||
if fluffs:
|
if fluffs:
|
||||||
# import pdb; pdb.set_trace()
|
|
||||||
random.shuffle(fluffs)
|
random.shuffle(fluffs)
|
||||||
connection.append_write_buf(protocol.CreatePacket(
|
connection.append_write_buf(protocol.CreatePacket(
|
||||||
'inv',
|
'inv',
|
||||||
|
|
|
@ -211,8 +211,7 @@ class TCPConnection(BMProto, TLSDispatcher):
|
||||||
# may lock for a long time, but I think it's better than
|
# may lock for a long time, but I think it's better than
|
||||||
# thousands of small locks
|
# thousands of small locks
|
||||||
with self.objectsNewToThemLock:
|
with self.objectsNewToThemLock:
|
||||||
# import pdb;pdb.set_trace()
|
for objHash in Inventory()._realInventory.unexpired_hashes_by_stream(stream):
|
||||||
for objHash in Inventory().unexpired_hashes_by_stream(stream):
|
|
||||||
# don't advertise stem objects on bigInv
|
# don't advertise stem objects on bigInv
|
||||||
if Dandelion().hasHash(objHash):
|
if Dandelion().hasHash(objHash):
|
||||||
continue
|
continue
|
||||||
|
@ -221,18 +220,18 @@ class TCPConnection(BMProto, TLSDispatcher):
|
||||||
payload = bytes()
|
payload = bytes()
|
||||||
# Now let us start appending all of these hashes together. They will be
|
# Now let us start appending all of these hashes together. They will be
|
||||||
# sent out in a big inv message to our new peer.
|
# sent out in a big inv message to our new peer.
|
||||||
if len(bigInvList) is not 0:
|
|
||||||
for obj_hash, _ in bigInvList.items():
|
|
||||||
payload += obj_hash
|
|
||||||
objectCount += 1
|
|
||||||
|
|
||||||
# Remove -1 below when sufficient time has passed for users to
|
for obj_hash, _ in bigInvList.items():
|
||||||
# upgrade to versions of PyBitmessage that accept inv with 50,000
|
payload += obj_hash
|
||||||
# items
|
objectCount += 1
|
||||||
if objectCount >= MAX_OBJECT_COUNT - 1:
|
|
||||||
sendChunk()
|
# Remove -1 below when sufficient time has passed for users to
|
||||||
payload = b''
|
# upgrade to versions of PyBitmessage that accept inv with 50,000
|
||||||
objectCount = 0
|
# items
|
||||||
|
if objectCount >= MAX_OBJECT_COUNT - 1:
|
||||||
|
sendChunk()
|
||||||
|
payload = b''
|
||||||
|
objectCount = 0
|
||||||
|
|
||||||
# flush
|
# flush
|
||||||
sendChunk()
|
sendChunk()
|
||||||
|
@ -429,4 +428,4 @@ class TCPServer(AdvancedDispatcher):
|
||||||
connectionpool.BMConnectionPool().addConnection(
|
connectionpool.BMConnectionPool().addConnection(
|
||||||
TCPConnection(sock=sock))
|
TCPConnection(sock=sock))
|
||||||
except socket.error:
|
except socket.error:
|
||||||
pass
|
pass
|
|
@ -112,7 +112,6 @@ def _doFastPoW(target, initialHash):
|
||||||
|
|
||||||
|
|
||||||
def _doCPoW(target, initialHash):
|
def _doCPoW(target, initialHash):
|
||||||
# import pdb; pdb.set_trace()
|
|
||||||
out_h = ctypes.pointer(ctypes.create_string_buffer(initialHash, 64))
|
out_h = ctypes.pointer(ctypes.create_string_buffer(initialHash, 64))
|
||||||
out_m = ctypes.c_ulonglong(target)
|
out_m = ctypes.c_ulonglong(target)
|
||||||
logger.debug("C PoW start")
|
logger.debug("C PoW start")
|
||||||
|
@ -253,7 +252,6 @@ def run(target, initialHash):
|
||||||
pass # fallback
|
pass # fallback
|
||||||
if bmpow:
|
if bmpow:
|
||||||
try:
|
try:
|
||||||
print('-------------inside the proofofwork-----------------')
|
|
||||||
return _doCPoW(target, initialHash)
|
return _doCPoW(target, initialHash)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
raise
|
raise
|
||||||
|
@ -294,8 +292,6 @@ def init():
|
||||||
|
|
||||||
openclpow.initCL()
|
openclpow.initCL()
|
||||||
if sys.platform == "win32":
|
if sys.platform == "win32":
|
||||||
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
|
|
||||||
print('inside the sys.platform == "win32"')
|
|
||||||
if ctypes.sizeof(ctypes.c_voidp) == 4:
|
if ctypes.sizeof(ctypes.c_voidp) == 4:
|
||||||
bitmsglib = 'bitmsghash32.dll'
|
bitmsglib = 'bitmsghash32.dll'
|
||||||
else:
|
else:
|
||||||
|
@ -328,9 +324,6 @@ def init():
|
||||||
bso = None
|
bso = None
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# import pdb; pdb.set_trace()
|
|
||||||
print('####################################')
|
|
||||||
print('else else else eles else ')
|
|
||||||
try:
|
try:
|
||||||
bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
||||||
except OSError:
|
except OSError:
|
||||||
|
@ -354,6 +347,4 @@ def init():
|
||||||
else:
|
else:
|
||||||
bmpow = None
|
bmpow = None
|
||||||
if bmpow is None:
|
if bmpow is None:
|
||||||
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
|
|
||||||
print('intailed the bmpow')
|
|
||||||
buildCPoW()
|
buildCPoW()
|
||||||
|
|
|
@ -13,7 +13,6 @@ from storage.storage import InventoryStorage, InventoryItem
|
||||||
class SqliteInventory(InventoryStorage): # pylint: disable=too-many-ancestors
|
class SqliteInventory(InventoryStorage): # pylint: disable=too-many-ancestors
|
||||||
"""Inventory using SQLite"""
|
"""Inventory using SQLite"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# import pdb;pdb.set_trace()
|
|
||||||
super(SqliteInventory, self).__init__()
|
super(SqliteInventory, self).__init__()
|
||||||
# of objects (like msg payloads and pubkey payloads)
|
# of objects (like msg payloads and pubkey payloads)
|
||||||
# Does not include protocol headers (the first 24 bytes of each packet).
|
# Does not include protocol headers (the first 24 bytes of each packet).
|
||||||
|
@ -30,8 +29,6 @@ class SqliteInventory(InventoryStorage): # pylint: disable=too-many-ancestors
|
||||||
self.lock = RLock()
|
self.lock = RLock()
|
||||||
|
|
||||||
def __contains__(self, hash_):
|
def __contains__(self, hash_):
|
||||||
print('__contains__(self, hash_)__contains__(self, hash_)__contains__(self, hash_) ',hash_)
|
|
||||||
hash_ = str(hash_).encode() if type(hash_) == int else hash_
|
|
||||||
with self.lock:
|
with self.lock:
|
||||||
if hash_ in self._objects:
|
if hash_ in self._objects:
|
||||||
return True
|
return True
|
||||||
|
@ -42,59 +39,34 @@ class SqliteInventory(InventoryStorage): # pylint: disable=too-many-ancestors
|
||||||
return False
|
return False
|
||||||
self._objects[hash_] = rows[0][0]
|
self._objects[hash_] = rows[0][0]
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# def __getitem__(self, hash_):
|
|
||||||
# raw = [None]
|
|
||||||
# # some think broke
|
|
||||||
# if hash_ == 0:
|
|
||||||
# hash_ = bytes()
|
|
||||||
# with self.lock:
|
|
||||||
# try:
|
|
||||||
# if hash_ in self._inventory:
|
|
||||||
# return self._inventory[hash_]
|
|
||||||
# rows = sqlQuery(
|
|
||||||
# 'SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE hash=?',
|
|
||||||
# sqlite3.Binary(hash_))
|
|
||||||
# if not rows:
|
|
||||||
# # raise KeyError(hash_)
|
|
||||||
# pass
|
|
||||||
# except:
|
|
||||||
# rows = [hash_]
|
|
||||||
# return InventoryItem(*rows[0])
|
|
||||||
|
|
||||||
def __getitem__(self, hash_):
|
def __getitem__(self, hash_):
|
||||||
# import pdb;pdb.set_trace()
|
|
||||||
with self.lock:
|
with self.lock:
|
||||||
if hash_ in self._inventory:
|
if hash_ in self._inventory:
|
||||||
return self._inventory[hash_]
|
return self._inventory[hash_]
|
||||||
rows = sqlQuery(
|
rows = sqlQuery(
|
||||||
'SELECT objecttype, streamnumber, payload, expirestime, tag'
|
'SELECT objecttype, streamnumber, payload, expirestime, tag'
|
||||||
' FROM inventory WHERE hash=?', sqlite3.Binary(bytes(hash_)))
|
' FROM inventory WHERE hash=?', sqlite3.Binary(hash_))
|
||||||
if not rows:
|
if not rows:
|
||||||
raise KeyError(hash_)
|
raise KeyError(hash_)
|
||||||
return InventoryItem(*rows[0])
|
return InventoryItem(*rows[0])
|
||||||
|
|
||||||
|
|
||||||
def __setitem__(self, hash_, value):
|
def __setitem__(self, hash_, value):
|
||||||
print('----------__setitem__------------------')
|
|
||||||
with self.lock:
|
with self.lock:
|
||||||
value = InventoryItem(*value)
|
value = InventoryItem(*value)
|
||||||
self._inventory[hash_] = value
|
self._inventory[hash_] = value
|
||||||
self._objects[hash_] = value.stream
|
self._objects[hash_] = value.stream
|
||||||
|
|
||||||
def __delitem__(self, hash_):
|
def __delitem__(self, hash_):
|
||||||
print('----------__delitem__------------------')
|
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
print('----------__iter__------------------')
|
|
||||||
with self.lock:
|
with self.lock:
|
||||||
hashes = self._inventory.keys()[:]
|
hashes = self._inventory.keys()[:]
|
||||||
hashes += (x for x, in sqlQuery('SELECT hash FROM inventory'))
|
hashes += (x for x, in sqlQuery('SELECT hash FROM inventory'))
|
||||||
return hashes.__iter__()
|
return hashes.__iter__()
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
print('----------__len__------------------')
|
|
||||||
with self.lock:
|
with self.lock:
|
||||||
return len(self._inventory) + sqlQuery(
|
return len(self._inventory) + sqlQuery(
|
||||||
'SELECT count(*) FROM inventory')[0][0]
|
'SELECT count(*) FROM inventory')[0][0]
|
||||||
|
@ -112,17 +84,14 @@ class SqliteInventory(InventoryStorage): # pylint: disable=too-many-ancestors
|
||||||
|
|
||||||
def unexpired_hashes_by_stream(self, stream):
|
def unexpired_hashes_by_stream(self, stream):
|
||||||
"""Return unexpired inventory vectors filtered by stream"""
|
"""Return unexpired inventory vectors filtered by stream"""
|
||||||
# print ('self._inventory.items() self._inventory.items() self._inventory.items()' ,self._inventory.items())
|
|
||||||
# import pdb;pdb.set_trace()
|
|
||||||
with self.lock:
|
with self.lock:
|
||||||
t = int(time.time())
|
t = int(time.time())
|
||||||
hashes = [x for x, value in self._inventory.items()
|
hashes = [x for x, value in self._inventory.items()
|
||||||
if value.stream == stream and value.expires > t]
|
if value.stream == stream and value.expires > t]
|
||||||
# print ('hasheshasheshasheshasheshasheshasheshasheshashes',hashes)
|
|
||||||
hashes += (payload for payload, in sqlQuery(
|
hashes += (payload for payload, in sqlQuery(
|
||||||
'SELECT hash FROM inventory WHERE streamnumber=?'
|
'SELECT hash FROM inventory WHERE streamnumber=?'
|
||||||
' AND expirestime>?', stream, t))
|
' AND expirestime>?', stream, t))
|
||||||
# print ('hasheshasheshasheshasheshasheshasheshasheshashes aaaaaaaaffter',hashes)
|
# print('sqlllllllllllllllllllllllllllllllllll',hashes)
|
||||||
return hashes
|
return hashes
|
||||||
|
|
||||||
def flush(self):
|
def flush(self):
|
||||||
|
|
Reference in New Issue
Block a user