2017-07-06 19:45:36 +02:00
|
|
|
import random
|
2017-06-21 12:16:33 +02:00
|
|
|
import threading
|
2017-07-05 09:25:49 +02:00
|
|
|
import time
|
2017-06-21 12:16:33 +02:00
|
|
|
|
|
|
|
import addresses
|
|
|
|
#from bmconfigparser import BMConfigParser
|
|
|
|
from debug import logger
|
|
|
|
from helper_threading import StoppableThread
|
|
|
|
#from inventory import Inventory
|
|
|
|
from network.connectionpool import BMConnectionPool
|
|
|
|
import protocol
|
2017-10-22 11:32:37 +02:00
|
|
|
from state import missingObjects
|
2017-06-21 12:16:33 +02:00
|
|
|
|
|
|
|
class DownloadThread(threading.Thread, StoppableThread):
|
2017-10-22 11:32:37 +02:00
|
|
|
minPending = 200
|
2017-07-08 06:54:25 +02:00
|
|
|
requestChunk = 1000
|
2017-07-05 09:25:49 +02:00
|
|
|
requestTimeout = 60
|
|
|
|
cleanInterval = 60
|
|
|
|
requestExpires = 600
|
2017-06-21 12:16:33 +02:00
|
|
|
|
|
|
|
def __init__(self):
|
2017-07-10 07:05:50 +02:00
|
|
|
threading.Thread.__init__(self, name="Downloader")
|
2017-06-21 12:16:33 +02:00
|
|
|
self.initStop()
|
2017-07-10 07:05:50 +02:00
|
|
|
self.name = "Downloader"
|
2017-06-21 12:16:33 +02:00
|
|
|
logger.info("init download thread")
|
2017-07-05 09:25:49 +02:00
|
|
|
self.lastCleaned = time.time()
|
|
|
|
|
|
|
|
def cleanPending(self):
|
|
|
|
deadline = time.time() - DownloadThread.requestExpires
|
2017-10-22 11:32:37 +02:00
|
|
|
try:
|
|
|
|
toDelete = [k for k, v in missingObjects.iteritems() if v < deadline]
|
|
|
|
except RuntimeError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
for i in toDelete:
|
|
|
|
del missingObjects[i]
|
|
|
|
self.lastCleaned = time.time()
|
2017-06-21 12:16:33 +02:00
|
|
|
|
|
|
|
def run(self):
|
|
|
|
while not self._stopped:
|
|
|
|
requested = 0
|
2017-07-06 19:45:36 +02:00
|
|
|
# Choose downloading peers randomly
|
|
|
|
connections = BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values()
|
|
|
|
random.shuffle(connections)
|
|
|
|
for i in connections:
|
2017-07-05 09:25:49 +02:00
|
|
|
now = time.time()
|
|
|
|
timedOut = now - DownloadThread.requestTimeout
|
|
|
|
# this may take a while, but it needs a consistency so I think it's better to lock a bigger chunk
|
2017-06-21 12:16:33 +02:00
|
|
|
with i.objectsNewToMeLock:
|
2017-10-22 15:28:30 +02:00
|
|
|
try:
|
|
|
|
downloadPending = len(list((k for k, v in i.objectsNewToMe.iteritems() if k in missingObjects and missingObjects[k] > timedOut)))
|
|
|
|
except KeyError:
|
|
|
|
continue
|
2017-10-22 11:32:37 +02:00
|
|
|
if downloadPending >= DownloadThread.minPending:
|
2017-06-21 12:16:33 +02:00
|
|
|
continue
|
|
|
|
# keys with True values in the dict
|
2017-10-22 15:28:30 +02:00
|
|
|
try:
|
|
|
|
request = list((k for k, v in i.objectsNewToMe.iteritems() if k not in missingObjects or missingObjects[k] < timedOut))
|
|
|
|
except KeyError:
|
|
|
|
continue
|
2017-10-22 11:32:37 +02:00
|
|
|
random.shuffle(request)
|
2017-06-24 12:13:35 +02:00
|
|
|
if not request:
|
2017-06-21 12:16:33 +02:00
|
|
|
continue
|
|
|
|
if len(request) > DownloadThread.requestChunk - downloadPending:
|
|
|
|
request = request[:DownloadThread.requestChunk - downloadPending]
|
|
|
|
# mark them as pending
|
|
|
|
for k in request:
|
|
|
|
i.objectsNewToMe[k] = False
|
2017-10-22 11:32:37 +02:00
|
|
|
missingObjects[k] = now
|
2017-06-21 12:16:33 +02:00
|
|
|
|
2017-10-16 08:07:32 +02:00
|
|
|
payload = bytearray()
|
|
|
|
payload.extend(addresses.encodeVarint(len(request)))
|
|
|
|
for chunk in request:
|
|
|
|
payload.extend(chunk)
|
2017-07-06 19:45:36 +02:00
|
|
|
i.append_write_buf(protocol.CreatePacket('getdata', payload))
|
2017-06-21 12:16:33 +02:00
|
|
|
logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, len(request))
|
|
|
|
requested += len(request)
|
2017-07-05 09:25:49 +02:00
|
|
|
if time.time() >= self.lastCleaned + DownloadThread.cleanInterval:
|
|
|
|
self.cleanPending()
|
|
|
|
if not requested:
|
2017-10-22 11:32:37 +02:00
|
|
|
self.stop.wait(5)
|