Put uploads into a separate thread #1415

Merged
PeterSurda merged 3 commits from writebuf_upload_fix into v0.6 2018-12-21 06:33:57 +01:00
5 changed files with 84 additions and 20 deletions

View File

@ -58,6 +58,7 @@ from network.announcethread import AnnounceThread
from network.invthread import InvThread from network.invthread import InvThread
from network.addrthread import AddrThread from network.addrthread import AddrThread
from network.downloadthread import DownloadThread from network.downloadthread import DownloadThread
from network.uploadthread import UploadThread
# Helper Functions # Helper Functions
import helper_generic import helper_generic
@ -333,6 +334,9 @@ class Main:
state.downloadThread = DownloadThread() state.downloadThread = DownloadThread()
state.downloadThread.daemon = True state.downloadThread.daemon = True
state.downloadThread.start() state.downloadThread.start()
state.uploadThread = UploadThread()
state.uploadThread.daemon = True
state.uploadThread.start()
connectToStream(1) connectToStream(1)

View File

@ -18,13 +18,14 @@ from network.node import Node
from network.objectracker import ObjectTracker from network.objectracker import ObjectTracker
from network.proxy import ProxyError from network.proxy import ProxyError
from objectracker import missingObjects from objectracker import missingObjects
from randomtrackingdict import RandomTrackingDict
import addresses import addresses
from queues import objectProcessorQueue, portCheckerQueue, invQueue, addrQueue from queues import objectProcessorQueue, portCheckerQueue, invQueue, addrQueue
import shared import shared
import state import state
import protocol import protocol
import helper_random
class BMProtoError(ProxyError): class BMProtoError(ProxyError):
errorCodes = ("Protocol error") errorCodes = ("Protocol error")
@ -57,6 +58,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
self.isOutbound = False self.isOutbound = False
# packet/connection from a local IP # packet/connection from a local IP
self.local = False self.local = False
self.pendingUpload = RandomTrackingDict()
def bm_proto_reset(self): def bm_proto_reset(self):
self.magic = None self.magic = None
@ -277,25 +279,11 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
def bm_command_getdata(self): def bm_command_getdata(self):
items = self.decode_payload_content("l32s") items = self.decode_payload_content("l32s")
# skip? # skip?
if time.time() < self.skipUntil: now = time.time()
if now < self.skipUntil:
return True return True
#TODO make this more asynchronous for i in items:
helper_random.randomshuffle(items) self.pendingUpload[str(i)] = now
for i in map(str, items):
if Dandelion().hasHash(i) and \
self != Dandelion().objectChildStem(i):
self.antiIntersectionDelay()
logger.info('%s asked for a stem object we didn\'t offer to it.', self.destination)
break
else:
try:
self.append_write_buf(protocol.CreatePacket('object', Inventory()[i].payload))
except KeyError:
self.antiIntersectionDelay()
logger.info('%s asked for an object we don\'t have.', self.destination)
break
# I think that aborting after the first missing/stem object is more secure
# when using random reordering, as the recipient won't know exactly which objects we refuse to deliver
return True return True
def _command_inv(self, dandelion=False): def _command_inv(self, dandelion=False):

View File

@ -0,0 +1,71 @@
"""
src/network/uploadthread.py
"""
# pylint: disable=unsubscriptable-object
import threading
import time
import helper_random
import protocol
from debug import logger
from helper_threading import StoppableThread
from inventory import Inventory
from network.connectionpool import BMConnectionPool
from network.dandelion import Dandelion
from randomtrackingdict import RandomTrackingDict
class UploadThread(threading.Thread, StoppableThread):
"""This is a thread that uploads the objects that the peers requested from me """
maxBufSize = 2097152 # 2MB
def __init__(self):
threading.Thread.__init__(self, name="Uploader")
self.initStop()
self.name = "Uploader"
logger.info("init upload thread")
def run(self):
while not self._stopped:
uploaded = 0
# Choose downloading peers randomly
connections = [x for x in BMConnectionPool().inboundConnections.values() +
BMConnectionPool().outboundConnections.values() if x.fullyEstablished]
helper_random.randomshuffle(connections)
for i in connections:
now = time.time()
# avoid unnecessary delay
if i.skipUntil >= now:
continue
if len(i.write_buf) > UploadThread.maxBufSize:
continue
try:
request = i.pendingUpload.randomKeys(RandomTrackingDict.maxPending)
except KeyError:
continue
payload = bytearray()
chunk_count = 0
for chunk in request:
del i.pendingUpload[chunk]
if Dandelion().hasHash(chunk) and \
i != Dandelion().objectChildStem(chunk):
i.antiIntersectionDelay()
logger.info('%s asked for a stem object we didn\'t offer to it.',
i.destination)
break
try:
payload.extend(protocol.CreatePacket('object',
Inventory()[chunk].payload))
chunk_count += 1
except KeyError:
i.antiIntersectionDelay()
logger.info('%s asked for an object we don\'t have.', i.destination)
break
if not chunk_count:
continue
i.append_write_buf(payload)
logger.debug("%s:%i Uploading %i objects",
i.destination.host, i.destination.port, chunk_count)
uploaded += chunk_count
if not uploaded:
self.stop.wait(1)

View File

@ -34,6 +34,7 @@ maximumNumberOfHalfOpenConnections = 0
invThread = None invThread = None
addrThread = None addrThread = None
downloadThread = None downloadThread = None
uploadThread = None
ownAddresses = {} ownAddresses = {}

View File

@ -23,7 +23,7 @@ class TestProcessProto(unittest.TestCase):
it starts pybitmessage in setUpClass() and stops it in tearDownClass() it starts pybitmessage in setUpClass() and stops it in tearDownClass()
""" """
_process_cmd = ['pybitmessage', '-d'] _process_cmd = ['pybitmessage', '-d']
_threads_count = 14 _threads_count = 15
_files = ( _files = (
'keys.dat', 'debug.log', 'messages.dat', 'knownnodes.dat', 'keys.dat', 'debug.log', 'messages.dat', 'knownnodes.dat',
'.api_started', 'unittest.lock' '.api_started', 'unittest.lock'