Dandelion++ implementation
- untested, some functionality may be missing, don't turn on - also, it randomises upload of requested objects - affects #1049
This commit is contained in:
parent
8495836428
commit
6ce86b1d0a
|
@ -54,6 +54,7 @@ from bmconfigparser import BMConfigParser
|
||||||
from inventory import Inventory
|
from inventory import Inventory
|
||||||
|
|
||||||
from network.connectionpool import BMConnectionPool
|
from network.connectionpool import BMConnectionPool
|
||||||
|
from network.dandelion import DandelionStems
|
||||||
from network.networkthread import BMNetworkThread
|
from network.networkthread import BMNetworkThread
|
||||||
from network.receivequeuethread import ReceiveQueueThread
|
from network.receivequeuethread import ReceiveQueueThread
|
||||||
from network.announcethread import AnnounceThread
|
from network.announcethread import AnnounceThread
|
||||||
|
@ -248,6 +249,7 @@ class Main:
|
||||||
sqlLookup.start()
|
sqlLookup.start()
|
||||||
|
|
||||||
Inventory() # init
|
Inventory() # init
|
||||||
|
DandelionStems() # init, needs to be early because other thread may access it early
|
||||||
|
|
||||||
# SMTP delivery thread
|
# SMTP delivery thread
|
||||||
if daemon and BMConfigParser().safeGet("bitmessagesettings", "smtpdeliver", '') != '':
|
if daemon and BMConfigParser().safeGet("bitmessagesettings", "smtpdeliver", '') != '':
|
||||||
|
|
|
@ -20,6 +20,7 @@ BMConfigDefaults = {
|
||||||
},
|
},
|
||||||
"network": {
|
"network": {
|
||||||
"bind": '',
|
"bind": '',
|
||||||
|
"dandelion": 0,
|
||||||
},
|
},
|
||||||
"inventory": {
|
"inventory": {
|
||||||
"storage": "sqlite",
|
"storage": "sqlite",
|
||||||
|
|
|
@ -10,6 +10,7 @@ from helper_sql import *
|
||||||
from helper_threading import *
|
from helper_threading import *
|
||||||
from inventory import Inventory
|
from inventory import Inventory
|
||||||
from network.connectionpool import BMConnectionPool
|
from network.connectionpool import BMConnectionPool
|
||||||
|
from network.dandelion import DandelionStems
|
||||||
from debug import logger
|
from debug import logger
|
||||||
import knownnodes
|
import knownnodes
|
||||||
import queues
|
import queues
|
||||||
|
@ -126,6 +127,10 @@ class singleCleaner(threading.Thread, StoppableThread):
|
||||||
# inv/object tracking
|
# inv/object tracking
|
||||||
for connection in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values():
|
for connection in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values():
|
||||||
connection.clean()
|
connection.clean()
|
||||||
|
# dandelion fluff trigger by expiration
|
||||||
|
for h, t in DandelionStems().timeouts:
|
||||||
|
if time.time() > t:
|
||||||
|
DandelionStems().remove(h)
|
||||||
|
|
||||||
# discovery tracking
|
# discovery tracking
|
||||||
exp = time.time() - singleCleaner.expireDiscoveredPeers
|
exp = time.time() - singleCleaner.expireDiscoveredPeers
|
||||||
|
|
|
@ -4,6 +4,7 @@ import time
|
||||||
from addresses import calculateInventoryHash
|
from addresses import calculateInventoryHash
|
||||||
from debug import logger
|
from debug import logger
|
||||||
from inventory import Inventory
|
from inventory import Inventory
|
||||||
|
from network.dandelion import DandelionStems
|
||||||
import protocol
|
import protocol
|
||||||
import state
|
import state
|
||||||
|
|
||||||
|
@ -66,6 +67,9 @@ class BMObject(object):
|
||||||
raise BMObjectUnwantedStreamError()
|
raise BMObjectUnwantedStreamError()
|
||||||
|
|
||||||
def checkAlreadyHave(self):
|
def checkAlreadyHave(self):
|
||||||
|
# if it's a stem duplicate, pretend we don't have it
|
||||||
|
if self.inventoryHash in DandelionStems().stem:
|
||||||
|
return
|
||||||
if self.inventoryHash in Inventory():
|
if self.inventoryHash in Inventory():
|
||||||
raise BMObjectAlreadyHaveError()
|
raise BMObjectAlreadyHaveError()
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import base64
|
import base64
|
||||||
import hashlib
|
import hashlib
|
||||||
import time
|
import time
|
||||||
|
import random
|
||||||
import socket
|
import socket
|
||||||
import struct
|
import struct
|
||||||
|
|
||||||
|
@ -9,6 +10,7 @@ from debug import logger
|
||||||
from inventory import Inventory
|
from inventory import Inventory
|
||||||
import knownnodes
|
import knownnodes
|
||||||
from network.advanceddispatcher import AdvancedDispatcher
|
from network.advanceddispatcher import AdvancedDispatcher
|
||||||
|
from network.dandelion import DandelionStems, REASSIGN_INTERVAL
|
||||||
from network.bmobject import BMObject, BMObjectInsufficientPOWError, BMObjectInvalidDataError, \
|
from network.bmobject import BMObject, BMObjectInsufficientPOWError, BMObjectInvalidDataError, \
|
||||||
BMObjectExpiredError, BMObjectUnwantedStreamError, BMObjectInvalidError, BMObjectAlreadyHaveError
|
BMObjectExpiredError, BMObjectUnwantedStreamError, BMObjectInvalidError, BMObjectAlreadyHaveError
|
||||||
import network.connectionpool
|
import network.connectionpool
|
||||||
|
@ -61,6 +63,8 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
||||||
self.payloadOffset = 0
|
self.payloadOffset = 0
|
||||||
self.expectBytes = protocol.Header.size
|
self.expectBytes = protocol.Header.size
|
||||||
self.object = None
|
self.object = None
|
||||||
|
self.dandelionRoutes = []
|
||||||
|
self.dandelionRefresh = 0
|
||||||
|
|
||||||
def state_bm_header(self):
|
def state_bm_header(self):
|
||||||
self.magic, self.command, self.payloadLength, self.checksum = protocol.Header.unpack(self.read_buf[:protocol.Header.size])
|
self.magic, self.command, self.payloadLength, self.checksum = protocol.Header.unpack(self.read_buf[:protocol.Header.size])
|
||||||
|
@ -266,13 +270,23 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
||||||
# skip?
|
# skip?
|
||||||
if time.time() < self.skipUntil:
|
if time.time() < self.skipUntil:
|
||||||
return True
|
return True
|
||||||
#TODO make this more asynchronous and allow reordering
|
#TODO make this more asynchronous
|
||||||
|
random.shuffle(items)
|
||||||
for i in items:
|
for i in items:
|
||||||
try:
|
if i in DandelionStems().stem and \
|
||||||
self.append_write_buf(protocol.CreatePacket('object', Inventory()[i].payload))
|
self not in DandelionStems().stem[i]:
|
||||||
except KeyError:
|
|
||||||
self.antiIntersectionDelay()
|
self.antiIntersectionDelay()
|
||||||
logger.info('%s asked for an object we don\'t have.', self.destination)
|
logger.info('%s asked for a stem object we didn\'t offer to it.', self.destination)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
self.append_write_buf(protocol.CreatePacket('object', Inventory()[i].payload))
|
||||||
|
except KeyError:
|
||||||
|
self.antiIntersectionDelay()
|
||||||
|
logger.info('%s asked for an object we don\'t have.', self.destination)
|
||||||
|
break
|
||||||
|
# I think that aborting after the first missing/stem object is more secure
|
||||||
|
# when using random reordering, as the recipient won't know exactly which objects we refuse to deliver
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def bm_command_inv(self):
|
def bm_command_inv(self):
|
||||||
|
@ -289,6 +303,34 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def bm_command_dinv(self):
|
||||||
|
"""
|
||||||
|
Dandelion stem announce
|
||||||
|
"""
|
||||||
|
items = self.decode_payload_content("l32s")
|
||||||
|
|
||||||
|
if len(items) >= BMProto.maxObjectCount:
|
||||||
|
logger.error("Too many items in dinv message!")
|
||||||
|
raise BMProtoExcessiveDataError()
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# ignore command if dandelion turned off
|
||||||
|
if BMConfigParser().safeGetBoolean("network", "dandelion") == 0:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if self.dandelionRefresh < time.time():
|
||||||
|
self.dandelionRoutes = network.connectionpool.dandelionRouteSelector(self)
|
||||||
|
self.dandelionRefresh = time.time() + REASSIGN_INTERVAL
|
||||||
|
|
||||||
|
for i in items:
|
||||||
|
# Fluff trigger by RNG, per item
|
||||||
|
if random.randint(1, 100) < BMConfigParser().safeGetBoolean("network", "dandelion"):
|
||||||
|
DandelionStem().add(i, self.dandelionRoutes)
|
||||||
|
self.handleReceivedInventory(i)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
def bm_command_object(self):
|
def bm_command_object(self):
|
||||||
objectOffset = self.payloadOffset
|
objectOffset = self.payloadOffset
|
||||||
nonce, expiresTime, objectType, version, streamNumber = self.decode_payload_content("QQIvv")
|
nonce, expiresTime, objectType, version, streamNumber = self.decode_payload_content("QQIvv")
|
||||||
|
|
|
@ -51,6 +51,22 @@ class BMConnectionPool(object):
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def dandelionRouteSelector(node):
|
||||||
|
# Choose 2 peers randomly
|
||||||
|
# TODO: handle streams
|
||||||
|
peers = []
|
||||||
|
connections = BMConnectionPool().inboundConnections.values() + \
|
||||||
|
BMConnectionPool().outboundConnections.values()
|
||||||
|
random.shuffle(connections)
|
||||||
|
for i in connections:
|
||||||
|
if i == node:
|
||||||
|
continue
|
||||||
|
if i.services | protocol.NODE_DANDELION:
|
||||||
|
peers.append(i)
|
||||||
|
if len(peers) == 2:
|
||||||
|
break
|
||||||
|
return peers
|
||||||
|
|
||||||
def connectToStream(self, streamNumber):
|
def connectToStream(self, streamNumber):
|
||||||
self.streams.append(streamNumber)
|
self.streams.append(streamNumber)
|
||||||
|
|
||||||
|
|
29
src/network/dandelion.py
Normal file
29
src/network/dandelion.py
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
import random
|
||||||
|
from threading import RLock
|
||||||
|
|
||||||
|
import protocol
|
||||||
|
from singleton import Singleton
|
||||||
|
|
||||||
|
# randomise routes after 600 seconds
|
||||||
|
REASSIGN_INTERVAL = 600
|
||||||
|
FLUFF_TRIGGER_TIMEOUT = 300
|
||||||
|
|
||||||
|
@Singleton
|
||||||
|
class DandelionStems():
|
||||||
|
def __init__(self):
|
||||||
|
self.stem = {}
|
||||||
|
self.timeouts = {}
|
||||||
|
self.lock = RLock()
|
||||||
|
|
||||||
|
def add(self, hashId, stems):
|
||||||
|
with self.lock:
|
||||||
|
self.stem[hashId] = stems
|
||||||
|
self.timeouts[hashId] = time.time()
|
||||||
|
|
||||||
|
def remove(self, hashId):
|
||||||
|
with self.lock:
|
||||||
|
try:
|
||||||
|
del self.stem[hashId]
|
||||||
|
del self.timeouts[hashId]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
|
@ -4,6 +4,7 @@ import threading
|
||||||
import addresses
|
import addresses
|
||||||
from helper_threading import StoppableThread
|
from helper_threading import StoppableThread
|
||||||
from network.connectionpool import BMConnectionPool
|
from network.connectionpool import BMConnectionPool
|
||||||
|
from network.dandelion import DandelionStems
|
||||||
from queues import invQueue
|
from queues import invQueue
|
||||||
import protocol
|
import protocol
|
||||||
import state
|
import state
|
||||||
|
@ -39,6 +40,8 @@ class InvThread(threading.Thread, StoppableThread):
|
||||||
for inv in chunk:
|
for inv in chunk:
|
||||||
if inv[0] not in connection.streams:
|
if inv[0] not in connection.streams:
|
||||||
continue
|
continue
|
||||||
|
if inv in DandelionStems().stem and connection not in DandelionStems().stem[inv]:
|
||||||
|
continue
|
||||||
try:
|
try:
|
||||||
with connection.objectsNewToThemLock:
|
with connection.objectsNewToThemLock:
|
||||||
del connection.objectsNewToThem[inv[1]]
|
del connection.objectsNewToThem[inv[1]]
|
||||||
|
|
|
@ -4,6 +4,7 @@ from threading import RLock
|
||||||
|
|
||||||
from debug import logger
|
from debug import logger
|
||||||
from inventory import Inventory
|
from inventory import Inventory
|
||||||
|
from network.dandelion import DandelionStems
|
||||||
|
|
||||||
haveBloom = False
|
haveBloom = False
|
||||||
|
|
||||||
|
@ -83,6 +84,11 @@ class ObjectTracker(object):
|
||||||
if hashId not in Inventory():
|
if hashId not in Inventory():
|
||||||
with self.objectsNewToMeLock:
|
with self.objectsNewToMeLock:
|
||||||
self.objectsNewToMe[hashId] = True
|
self.objectsNewToMe[hashId] = True
|
||||||
|
elif hashId in DandelionStems().stem:
|
||||||
|
# Fluff trigger by cycle detection
|
||||||
|
DandelionStems().remove(hashId)
|
||||||
|
with self.objectsNewToMeLock:
|
||||||
|
self.objectsNewToMe[hashId] = True
|
||||||
|
|
||||||
def hasAddr(self, addr):
|
def hasAddr(self, addr):
|
||||||
if haveBloom:
|
if haveBloom:
|
||||||
|
|
|
@ -18,6 +18,7 @@ from network.advanceddispatcher import AdvancedDispatcher
|
||||||
from network.bmproto import BMProtoError, BMProtoInsufficientDataError, BMProtoExcessiveDataError, BMProto
|
from network.bmproto import BMProtoError, BMProtoInsufficientDataError, BMProtoExcessiveDataError, BMProto
|
||||||
from network.bmobject import BMObject, BMObjectInsufficientPOWError, BMObjectInvalidDataError, BMObjectExpiredError, BMObjectUnwantedStreamError, BMObjectInvalidError, BMObjectAlreadyHaveError
|
from network.bmobject import BMObject, BMObjectInsufficientPOWError, BMObjectInvalidDataError, BMObjectExpiredError, BMObjectUnwantedStreamError, BMObjectInvalidError, BMObjectAlreadyHaveError
|
||||||
import network.connectionpool
|
import network.connectionpool
|
||||||
|
from network.dandelion import DandelionStems
|
||||||
from network.node import Node
|
from network.node import Node
|
||||||
import network.asyncore_pollchoose as asyncore
|
import network.asyncore_pollchoose as asyncore
|
||||||
from network.proxy import Proxy, ProxyError, GeneralProxyError
|
from network.proxy import Proxy, ProxyError, GeneralProxyError
|
||||||
|
@ -88,7 +89,7 @@ class TCPConnection(BMProto, TLSDispatcher):
|
||||||
if self.skipUntil > time.time():
|
if self.skipUntil > time.time():
|
||||||
logger.debug("Initial skipping processing getdata for %.2fs", self.skipUntil - time.time())
|
logger.debug("Initial skipping processing getdata for %.2fs", self.skipUntil - time.time())
|
||||||
else:
|
else:
|
||||||
logger.debug("Skipping processing getdata due to missing object for %.2fs", self.skipUntil - time.time())
|
logger.debug("Skipping processing getdata due to missing object for %.2fs", delay)
|
||||||
self.skipUntil = time.time() + delay
|
self.skipUntil = time.time() + delay
|
||||||
|
|
||||||
def state_connection_fully_established(self):
|
def state_connection_fully_established(self):
|
||||||
|
@ -165,6 +166,9 @@ class TCPConnection(BMProto, TLSDispatcher):
|
||||||
# may lock for a long time, but I think it's better than thousands of small locks
|
# may lock for a long time, but I think it's better than thousands of small locks
|
||||||
with self.objectsNewToThemLock:
|
with self.objectsNewToThemLock:
|
||||||
for objHash in Inventory().unexpired_hashes_by_stream(stream):
|
for objHash in Inventory().unexpired_hashes_by_stream(stream):
|
||||||
|
# don't advertise stem objects on bigInv
|
||||||
|
if objHash in DandelionStems().stem:
|
||||||
|
continue
|
||||||
bigInvList[objHash] = 0
|
bigInvList[objHash] = 0
|
||||||
self.objectsNewToThem[objHash] = time.time()
|
self.objectsNewToThem[objHash] = time.time()
|
||||||
objectCount = 0
|
objectCount = 0
|
||||||
|
|
|
@ -23,6 +23,7 @@ from version import softwareVersion
|
||||||
#Service flags
|
#Service flags
|
||||||
NODE_NETWORK = 1
|
NODE_NETWORK = 1
|
||||||
NODE_SSL = 2
|
NODE_SSL = 2
|
||||||
|
NODE_DANDELION = 8
|
||||||
|
|
||||||
#Bitfield flags
|
#Bitfield flags
|
||||||
BITFIELD_DOESACK = 1
|
BITFIELD_DOESACK = 1
|
||||||
|
@ -191,7 +192,12 @@ def CreatePacket(command, payload=''):
|
||||||
def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server = False, nodeid = None):
|
def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server = False, nodeid = None):
|
||||||
payload = ''
|
payload = ''
|
||||||
payload += pack('>L', 3) # protocol version.
|
payload += pack('>L', 3) # protocol version.
|
||||||
payload += pack('>q', NODE_NETWORK|(NODE_SSL if haveSSL(server) else 0)) # bitflags of the services I offer.
|
# bitflags of the services I offer.
|
||||||
|
payload += pack('>q',
|
||||||
|
NODE_NETWORK |
|
||||||
|
(NODE_SSL if haveSSL(server) else 0) |
|
||||||
|
(NODE_DANDELION if BMConfigParser().safeGetInt('network', 'dandelion') > 0 else 0)
|
||||||
|
)
|
||||||
payload += pack('>q', int(time.time()))
|
payload += pack('>q', int(time.time()))
|
||||||
|
|
||||||
payload += pack(
|
payload += pack(
|
||||||
|
@ -203,7 +209,12 @@ def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server
|
||||||
payload += encodeHost(remoteHost)
|
payload += encodeHost(remoteHost)
|
||||||
payload += pack('>H', remotePort) # remote IPv6 and port
|
payload += pack('>H', remotePort) # remote IPv6 and port
|
||||||
|
|
||||||
payload += pack('>q', NODE_NETWORK|(NODE_SSL if haveSSL(server) else 0)) # bitflags of the services I offer.
|
# bitflags of the services I offer.
|
||||||
|
payload += pack('>q',
|
||||||
|
NODE_NETWORK |
|
||||||
|
(NODE_SSL if haveSSL(server) else 0) |
|
||||||
|
(NODE_DANDELION if BMConfigParser().safeGetInt('network', 'dandelion') > 0 else 0)
|
||||||
|
)
|
||||||
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack(
|
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack(
|
||||||
'>L', 2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
|
'>L', 2130706433) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
|
||||||
# we have a separate extPort and
|
# we have a separate extPort and
|
||||||
|
|
Loading…
Reference in New Issue
Block a user