2017-05-24 16:51:49 +02:00
|
|
|
import time
|
2017-06-02 07:09:35 +02:00
|
|
|
from threading import RLock
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
from inventory import Inventory
|
2018-02-03 11:46:39 +01:00
|
|
|
import network.connectionpool
|
2017-10-20 01:21:49 +02:00
|
|
|
from network.dandelion import Dandelion
|
2018-02-01 12:20:41 +01:00
|
|
|
from randomtrackingdict import RandomTrackingDict
|
2017-10-20 23:11:33 +02:00
|
|
|
from state import missingObjects
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
haveBloom = False
|
|
|
|
|
|
|
|
try:
|
|
|
|
# pybloomfiltermmap
|
|
|
|
from pybloomfilter import BloomFilter
|
|
|
|
haveBloom = True
|
|
|
|
except ImportError:
|
|
|
|
try:
|
|
|
|
# pybloom
|
|
|
|
from pybloom import BloomFilter
|
|
|
|
haveBloom = True
|
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# it isn't actually implemented yet so no point in turning it on
|
|
|
|
haveBloom = False
|
|
|
|
|
2017-05-27 19:09:21 +02:00
|
|
|
class ObjectTracker(object):
|
2017-05-24 16:51:49 +02:00
|
|
|
invCleanPeriod = 300
|
|
|
|
invInitialCapacity = 50000
|
|
|
|
invErrorRate = 0.03
|
2017-07-05 09:25:49 +02:00
|
|
|
trackingExpires = 3600
|
2018-01-02 22:20:33 +01:00
|
|
|
initialTimeOffset = 60
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
def __init__(self):
|
2018-02-01 12:19:39 +01:00
|
|
|
self.objectsNewToMe = RandomTrackingDict()
|
2017-05-24 16:51:49 +02:00
|
|
|
self.objectsNewToThem = {}
|
2017-06-02 07:09:35 +02:00
|
|
|
self.objectsNewToThemLock = RLock()
|
2017-05-24 16:51:49 +02:00
|
|
|
self.initInvBloom()
|
|
|
|
self.initAddrBloom()
|
2017-06-02 07:09:35 +02:00
|
|
|
self.lastCleaned = time.time()
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
def initInvBloom(self):
|
|
|
|
if haveBloom:
|
|
|
|
# lock?
|
2017-05-27 19:09:21 +02:00
|
|
|
self.invBloom = BloomFilter(capacity=ObjectTracker.invInitialCapacity,
|
|
|
|
error_rate=ObjectTracker.invErrorRate)
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
def initAddrBloom(self):
|
|
|
|
if haveBloom:
|
|
|
|
# lock?
|
2017-05-27 19:09:21 +02:00
|
|
|
self.addrBloom = BloomFilter(capacity=ObjectTracker.invInitialCapacity,
|
|
|
|
error_rate=ObjectTracker.invErrorRate)
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
def clean(self):
|
2017-06-02 07:09:35 +02:00
|
|
|
if self.lastCleaned < time.time() - ObjectTracker.invCleanPeriod:
|
2017-05-24 16:51:49 +02:00
|
|
|
if haveBloom:
|
2017-06-10 10:13:49 +02:00
|
|
|
# FIXME
|
2017-05-24 16:51:49 +02:00
|
|
|
if PendingDownloadQueue().size() == 0:
|
|
|
|
self.initInvBloom()
|
|
|
|
self.initAddrBloom()
|
2017-06-02 07:09:35 +02:00
|
|
|
else:
|
|
|
|
# release memory
|
2017-07-05 09:25:49 +02:00
|
|
|
deadline = time.time() - ObjectTracker.trackingExpires
|
2017-06-02 07:09:35 +02:00
|
|
|
with self.objectsNewToThemLock:
|
2017-07-05 09:25:49 +02:00
|
|
|
self.objectsNewToThem = {k: v for k, v in self.objectsNewToThem.iteritems() if v >= deadline}
|
2017-06-02 07:09:35 +02:00
|
|
|
self.lastCleaned = time.time()
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
def hasObj(self, hashid):
|
|
|
|
if haveBloom:
|
|
|
|
return hashid in self.invBloom
|
|
|
|
else:
|
|
|
|
return hashid in self.objectsNewToMe
|
|
|
|
|
2017-05-27 19:09:21 +02:00
|
|
|
def handleReceivedInventory(self, hashId):
|
2017-05-24 16:51:49 +02:00
|
|
|
if haveBloom:
|
2017-05-27 19:09:21 +02:00
|
|
|
self.invBloom.add(hashId)
|
2017-06-21 12:16:33 +02:00
|
|
|
try:
|
|
|
|
with self.objectsNewToThemLock:
|
|
|
|
del self.objectsNewToThem[hashId]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2018-02-03 11:46:39 +01:00
|
|
|
if hashId not in missingObjects:
|
|
|
|
missingObjects[hashId] = time.time()
|
|
|
|
self.objectsNewToMe[hashId] = True
|
|
|
|
|
|
|
|
def handleReceivedObject(self, streamNumber, hashid):
|
|
|
|
for i in network.connectionpool.BMConnectionPool().inboundConnections.values() + network.connectionpool.BMConnectionPool().outboundConnections.values():
|
|
|
|
if not i.fullyEstablished:
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
del i.objectsNewToMe[hashid]
|
|
|
|
except KeyError:
|
|
|
|
if streamNumber in i.streams and \
|
|
|
|
(not Dandelion().hasHash(hashid) or \
|
|
|
|
Dandelion().objectChildStem(hashid) == i):
|
|
|
|
with i.objectsNewToThemLock:
|
|
|
|
i.objectsNewToThem[hashid] = time.time()
|
|
|
|
# update stream number, which we didn't have when we just received the dinv
|
|
|
|
# also resets expiration of the stem mode
|
|
|
|
Dandelion().setHashStream(hashid, streamNumber)
|
|
|
|
|
|
|
|
if i == self:
|
|
|
|
try:
|
|
|
|
with i.objectsNewToThemLock:
|
|
|
|
del i.objectsNewToThem[hashid]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
def hasAddr(self, addr):
|
|
|
|
if haveBloom:
|
|
|
|
return addr in self.invBloom
|
|
|
|
|
|
|
|
def addAddr(self, hashid):
|
|
|
|
if haveBloom:
|
|
|
|
self.addrBloom.add(hashid)
|
|
|
|
|
|
|
|
# addr sending -> per node upload queue, and flush every minute or so
|
|
|
|
# inv sending -> if not in bloom, inv immediately, otherwise put into a per node upload queue and flush every minute or so
|
2017-05-27 19:09:21 +02:00
|
|
|
# data sending -> a simple queue
|
2017-05-24 16:51:49 +02:00
|
|
|
|
|
|
|
# no bloom
|
|
|
|
# - if inv arrives
|
|
|
|
# - if we don't have it, add tracking and download queue
|
|
|
|
# - if we do have it, remove from tracking
|
|
|
|
# tracking downloads
|
|
|
|
# - per node hash of items the node has but we don't
|
|
|
|
# tracking inv
|
|
|
|
# - per node hash of items that neither the remote node nor we have
|
|
|
|
#
|