2017-06-21 12:16:33 +02:00
|
|
|
import threading
|
2017-07-05 09:25:49 +02:00
|
|
|
import time
|
2017-06-21 12:16:33 +02:00
|
|
|
|
|
|
|
import addresses
|
2018-12-10 13:33:07 +01:00
|
|
|
import helper_random
|
|
|
|
import protocol
|
2018-02-03 11:46:39 +01:00
|
|
|
from dandelion import Dandelion
|
2017-06-21 12:16:33 +02:00
|
|
|
from debug import logger
|
|
|
|
from helper_threading import StoppableThread
|
2018-02-01 22:58:04 +01:00
|
|
|
from inventory import Inventory
|
2017-06-21 12:16:33 +02:00
|
|
|
from network.connectionpool import BMConnectionPool
|
2018-12-10 13:33:07 +01:00
|
|
|
from objectracker import missingObjects
|
|
|
|
|
2017-06-21 12:16:33 +02:00
|
|
|
|
|
|
|
class DownloadThread(threading.Thread, StoppableThread):
|
2017-10-22 11:32:37 +02:00
|
|
|
minPending = 200
|
2017-12-02 00:48:08 +01:00
|
|
|
maxRequestChunk = 1000
|
2017-07-05 09:25:49 +02:00
|
|
|
requestTimeout = 60
|
|
|
|
cleanInterval = 60
|
2017-12-02 00:48:08 +01:00
|
|
|
requestExpires = 3600
|
2017-06-21 12:16:33 +02:00
|
|
|
|
|
|
|
def __init__(self):
|
2017-07-10 07:05:50 +02:00
|
|
|
threading.Thread.__init__(self, name="Downloader")
|
2017-06-21 12:16:33 +02:00
|
|
|
self.initStop()
|
2017-07-10 07:05:50 +02:00
|
|
|
self.name = "Downloader"
|
2017-06-21 12:16:33 +02:00
|
|
|
logger.info("init download thread")
|
2017-07-05 09:25:49 +02:00
|
|
|
self.lastCleaned = time.time()
|
|
|
|
|
|
|
|
def cleanPending(self):
|
|
|
|
deadline = time.time() - DownloadThread.requestExpires
|
2017-10-22 11:32:37 +02:00
|
|
|
try:
|
|
|
|
toDelete = [k for k, v in missingObjects.iteritems() if v < deadline]
|
|
|
|
except RuntimeError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
for i in toDelete:
|
|
|
|
del missingObjects[i]
|
|
|
|
self.lastCleaned = time.time()
|
2017-06-21 12:16:33 +02:00
|
|
|
|
|
|
|
def run(self):
|
|
|
|
while not self._stopped:
|
|
|
|
requested = 0
|
2017-07-06 19:45:36 +02:00
|
|
|
# Choose downloading peers randomly
|
2018-02-01 12:48:14 +01:00
|
|
|
connections = [x for x in BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values() if x.fullyEstablished]
|
2018-03-21 14:56:27 +01:00
|
|
|
helper_random.randomshuffle(connections)
|
2017-12-02 00:50:58 +01:00
|
|
|
try:
|
2018-01-01 12:51:35 +01:00
|
|
|
requestChunk = max(int(min(DownloadThread.maxRequestChunk, len(missingObjects)) / len(connections)), 1)
|
2017-12-02 00:50:58 +01:00
|
|
|
except ZeroDivisionError:
|
|
|
|
requestChunk = 1
|
2017-07-06 19:45:36 +02:00
|
|
|
for i in connections:
|
2017-07-05 09:25:49 +02:00
|
|
|
now = time.time()
|
2018-04-02 19:33:41 +02:00
|
|
|
# avoid unnecessary delay
|
|
|
|
if i.skipUntil >= now:
|
|
|
|
continue
|
2018-02-01 12:19:39 +01:00
|
|
|
try:
|
|
|
|
request = i.objectsNewToMe.randomKeys(requestChunk)
|
|
|
|
except KeyError:
|
|
|
|
continue
|
2017-10-16 08:07:32 +02:00
|
|
|
payload = bytearray()
|
2018-04-02 19:33:41 +02:00
|
|
|
chunkCount = 0
|
2017-10-16 08:07:32 +02:00
|
|
|
for chunk in request:
|
2018-02-03 11:46:39 +01:00
|
|
|
if chunk in Inventory() and not Dandelion().hasHash(chunk):
|
2018-02-01 23:18:08 +01:00
|
|
|
try:
|
|
|
|
del i.objectsNewToMe[chunk]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2018-02-01 22:58:04 +01:00
|
|
|
continue
|
2017-10-16 08:07:32 +02:00
|
|
|
payload.extend(chunk)
|
2018-04-02 19:33:41 +02:00
|
|
|
chunkCount += 1
|
2018-02-01 12:26:54 +01:00
|
|
|
missingObjects[chunk] = now
|
2018-04-02 19:33:41 +02:00
|
|
|
if not chunkCount:
|
2018-02-02 12:44:43 +01:00
|
|
|
continue
|
2018-04-02 19:33:41 +02:00
|
|
|
payload[0:0] = addresses.encodeVarint(chunkCount)
|
2017-07-06 19:45:36 +02:00
|
|
|
i.append_write_buf(protocol.CreatePacket('getdata', payload))
|
2018-04-02 19:33:41 +02:00
|
|
|
logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, chunkCount)
|
|
|
|
requested += chunkCount
|
2017-07-05 09:25:49 +02:00
|
|
|
if time.time() >= self.lastCleaned + DownloadThread.cleanInterval:
|
|
|
|
self.cleanPending()
|
|
|
|
if not requested:
|
2018-02-03 11:46:39 +01:00
|
|
|
self.stop.wait(1)
|