logger.info('inv message lists %s objects. Of those %s are new to me. It took %s seconds to figure that out.',numberOfItemsInInv,len(objectsNewToMe),time.time()-startTime)
'ERROR TRYING TO UNPACK recaddr (recaddrStream). Message: %s\n'%str(err))
break# giving up on unpacking any more. We should still be connected however.
break# giving up on unpacking any more. We should still be connected however.
ifrecaddrStream==0:
continue
ifrecaddrStream!=self.streamNumberandrecaddrStream!=(self.streamNumber*2)andrecaddrStream!=((self.streamNumber*2)+1):# if the embedded stream number is not in my stream or either of my child streams then ignore it. Someone might be trying funny business.
'ERROR TRYING TO UNPACK recaddr (recaddrStream). Message: %s\n'%str(err))
break# giving up on unpacking any more. We should still be connected however.
ifrecaddrStream==0:
continue
ifrecaddrStream!=self.streamNumberandrecaddrStream!=(self.streamNumber*2)andrecaddrStream!=((self.streamNumber*2)+1):# if the embedded stream number is not in my stream or either of my child streams then ignore it. Someone might be trying funny business.
34*i):4+lengthOfNumberOfAddresses+(34*i)])# This is the 'time' value in the received addr message.
ifrecaddrStreamnotinshared.knownNodes:# knownNodes is a dictionary of dictionaries with one outer dictionary for each stream. If the outer stream dictionary doesn't exist yet then we must make it.
iflen(shared.knownNodes[recaddrStream])<20000andtimeSomeoneElseReceivedMessageFromThisNode>(int(time.time())-10800)andtimeSomeoneElseReceivedMessageFromThisNode<(int(time.time())+10800):# If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
peerFromAddrMessage]# PORT in this case is either the port we used to connect to the remote node, or the port that was specified by someone else in a past addr message.
38*i):8+lengthOfNumberOfAddresses+(38*i)])# This is the 'time' value in the received addr message. 64-bit.
ifrecaddrStreamnotinshared.knownNodes:# knownNodes is a dictionary of dictionaries with one outer dictionary for each stream. If the outer stream dictionary doesn't exist yet then we must make it.
shared.knownNodesLock.acquire()
output=open(shared.appdata+'knownnodes.dat','wb')
pickle.dump(shared.knownNodes,output)
output.close()
shared.knownNodes[recaddrStream]={}
shared.knownNodesLock.release()
self.broadcastaddr(
listOfAddressDetailsToBroadcastToPeers)# no longer broadcast
withshared.printLock:
print'knownNodes currently has',len(shared.knownNodes[self.streamNumber]),'nodes for this stream.'
elifself.remoteProtocolVersion>=2:# The difference is that in protocol version 2, network addresses use 64 bit times rather than 32 bit times.
'ERROR TRYING TO UNPACK recaddr (recaddrStream). Message: %s\n'%str(err))
break# giving up on unpacking any more. We should still be connected however.
ifrecaddrStream==0:
continue
ifrecaddrStream!=self.streamNumberandrecaddrStream!=(self.streamNumber*2)andrecaddrStream!=((self.streamNumber*2)+1):# if the embedded stream number is not in my stream or either of my child streams then ignore it. Someone might be trying funny business.
iflen(shared.knownNodes[recaddrStream])<20000andtimeSomeoneElseReceivedMessageFromThisNode>(int(time.time())-10800)andtimeSomeoneElseReceivedMessageFromThisNode<(int(time.time())+10800):# If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
38*i):8+lengthOfNumberOfAddresses+(38*i)])# This is the 'time' value in the received addr message. 64-bit.
ifrecaddrStreamnotinshared.knownNodes:# knownNodes is a dictionary of dictionaries with one outer dictionary for each stream. If the outer stream dictionary doesn't exist yet then we must make it.
print'added new node',peerFromAddrMessage,'to knownNodes in stream',recaddrStream
peerFromAddrMessage]# PORT in this case is either the port we used to connect to the remote node, or the port that was specified by someone else in a past addr message.
iflen(shared.knownNodes[recaddrStream])<20000andtimeSomeoneElseReceivedMessageFromThisNode>(int(time.time())-10800)andtimeSomeoneElseReceivedMessageFromThisNode<(int(time.time())+10800):# If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
peerFromAddrMessage]# PORT in this case is either the port we used to connect to the remote node, or the port that was specified by someone else in a past addr message.
ifneedToWriteKnownNodesToDisk:# Runs if any nodes were new to us. Also, share those nodes with our peers.
shared.knownNodesLock.acquire()
output=open(shared.appdata+'knownnodes.dat','wb')
try:
pickle.dump(shared.knownNodes,output)
output.close()
exceptExceptionaserr:
if"Errno 28"instr(err):
logger.fatal('(while receiveDataThread needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ')
shared.UISignalQueue.put(('alert',(tr.translateText("MainWindow","Disk full"),tr.translateText("MainWindow",'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),True)))
'''The singleCleaner class is a timer-driven thread that cleans data structures to free memory, resends messages when a remote node doesn't respond, and sends pong messages to keep connections alive if the network isn't busy.
0,'pong','no data'))# commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
@ -109,4 +115,33 @@ class singleCleaner(threading.Thread):
shared.workerQueue.put(('sendmessage',''))
shared.UISignalQueue.put((
'updateStatusBar','Doing work necessary to again attempt to deliver a message...'))
# Let's also clear and reload shared.inventorySets to keep it from
# taking up an unnecessary amount of memory.
forstreamNumberinshared.inventorySets:
shared.inventorySets[streamNumber]=set()
queryData=sqlQuery('''SELECT hash FROM inventory WHERE streamnumber=?''',streamNumber)
# Let us write out the knowNodes to disk if there is anything new to write out.
ifshared.needToWriteKnownNodesToDisk:
shared.knownNodesLock.acquire()
output=open(shared.appdata+'knownnodes.dat','wb')
try:
pickle.dump(shared.knownNodes,output)
output.close()
exceptExceptionaserr:
if"Errno 28"instr(err):
logger.fatal('(while receiveDataThread shared.needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ')
shared.UISignalQueue.put(('alert',(tr.translateText("MainWindow","Disk full"),tr.translateText("MainWindow",'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),True)))
inventorySets={}# key = streamNumer, value = a set which holds the inventory object hashes that we are aware of. This is used whenever we receive an inv message from a peer to check to see what items are new to us. We don't delete things out of it; instead, the singleCleaner thread clears and refills it every couple hours.
needToWriteKnownNodesToDisk=False# If True, the singleCleaner will write it to disk eventually.
#If changed, these values will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
networkDefaultProofOfWorkNonceTrialsPerByte=320#The amount of work that should be performed (and demanded) per byte of the payload. Double this number to double the work.
#Note that the singleCleanerThread clears out the inventory dictionary from time to time, although it only clears things that have been in the dictionary for a long time. This clears the inventory dictionary Now.