MiNode/minode/manager.py

309 lines
12 KiB
Python
Raw Normal View History

2016-06-30 10:11:33 +02:00
# -*- coding: utf-8 -*-
2022-09-23 00:54:12 +02:00
"""The main thread, managing connections, nodes and objects"""
2016-06-30 10:11:33 +02:00
import base64
import csv
2016-06-30 10:11:33 +02:00
import logging
2021-03-08 16:06:07 +01:00
import os
2016-06-30 10:11:33 +02:00
import pickle
import queue
import random
import threading
import time
2021-08-02 18:53:19 +02:00
from . import proofofwork, shared, structure
from .connection import Connection, SocksConnection
2021-03-09 15:40:59 +01:00
from .i2p import I2PDialer
2016-06-30 10:11:33 +02:00
class Manager(threading.Thread):
2022-09-23 00:54:12 +02:00
"""The manager thread"""
2016-06-30 10:11:33 +02:00
def __init__(self):
super().__init__(name='Manager')
self.q = queue.Queue()
self.last_cleaned_objects = time.time()
self.last_cleaned_connections = time.time()
self.last_pickled_objects = time.time()
self.last_pickled_nodes = time.time()
2021-03-08 16:06:07 +01:00
# Publish destination 5-15 minutes after start
self.last_published_i2p_destination = \
time.time() - 50 * 60 + random.uniform(-1, 1) * 300 # nosec B311
# Publish onion 4-8 min later
self.last_published_onion_peer = \
self.last_published_i2p_destination - 3 * 3600 + \
random.uniform(1, 2) * 240 # nosec B311
2016-06-30 10:11:33 +02:00
def run(self):
self.load_data()
self.clean_objects()
2016-06-30 10:11:33 +02:00
while True:
time.sleep(0.8)
now = time.time()
if shared.shutting_down:
logging.debug('Shutting down Manager')
break
2016-06-30 10:11:33 +02:00
if now - self.last_cleaned_objects > 90:
self.clean_objects()
self.last_cleaned_objects = now
if now - self.last_cleaned_connections > 2:
2017-07-20 18:22:59 +02:00
self.manage_connections()
2016-06-30 10:11:33 +02:00
self.last_cleaned_connections = now
if now - self.last_pickled_objects > 100:
self.pickle_objects()
self.last_pickled_objects = now
if now - self.last_pickled_nodes > 60:
self.pickle_nodes()
self.last_pickled_nodes = now
if now - self.last_published_i2p_destination > 3600:
self.publish_i2p_destination()
self.last_published_i2p_destination = now
if now - self.last_published_onion_peer > 3600 * 4:
self.publish_onion_peer()
self.last_published_onion_peer = now
2016-06-30 10:11:33 +02:00
@staticmethod
def clean_objects():
for vector in set(shared.objects):
if not shared.objects[vector].is_valid():
if shared.objects[vector].is_expired():
logging.debug(
'Deleted expired object: %s',
base64.b16encode(vector).decode())
else:
logging.warning(
'Deleted invalid object: %s',
base64.b16encode(vector).decode())
2016-06-30 10:11:33 +02:00
with shared.objects_lock:
del shared.objects[vector]
@staticmethod
2017-07-20 18:22:59 +02:00
def manage_connections():
2016-06-30 10:11:33 +02:00
hosts = set()
outgoing_connections = 0
2016-07-09 19:37:54 +02:00
for c in shared.connections.copy():
2016-06-30 10:11:33 +02:00
if not c.is_alive() or c.status == 'disconnected':
with shared.connections_lock:
shared.connections.remove(c)
else:
hosts.add(structure.NetAddrNoPrefix.network_group(c.host))
2016-06-30 10:11:33 +02:00
if not c.server:
outgoing_connections += 1
for d in shared.i2p_dialers.copy():
hosts.add(d.destination)
if not d.is_alive():
shared.i2p_dialers.remove(d)
to_connect = set()
if shared.trusted_peer:
to_connect.add(shared.trusted_peer)
2021-03-08 16:06:07 +01:00
if (
outgoing_connections < shared.outgoing_connections
and shared.send_outgoing_connections and not shared.trusted_peer
):
if shared.ip_enabled:
sample_length = 16
if shared.tor:
if len(shared.onion_unchecked_pool) > 4:
to_connect.update(random.sample(
tuple(shared.onion_unchecked_pool), 4))
else:
to_connect.update(shared.onion_unchecked_pool)
shared.onion_unchecked_pool.difference_update(to_connect)
if len(shared.onion_pool) > 2:
to_connect.update(random.sample(
tuple(shared.onion_pool), 2))
else:
to_connect.update(shared.onion_pool)
sample_length = 8
if len(shared.unchecked_node_pool) > sample_length:
2021-03-08 16:06:07 +01:00
to_connect.update(random.sample(
tuple(shared.unchecked_node_pool), sample_length))
else:
to_connect.update(shared.unchecked_node_pool)
shared.unchecked_node_pool.difference_update(to_connect)
if len(shared.node_pool) > sample_length / 2:
to_connect.update(random.sample(
tuple(shared.node_pool), int(sample_length / 2)))
else:
to_connect.update(shared.node_pool)
if shared.i2p_enabled:
if len(shared.i2p_unchecked_node_pool) > 16:
to_connect.update(random.sample(
tuple(shared.i2p_unchecked_node_pool), 16))
else:
to_connect.update(shared.i2p_unchecked_node_pool)
shared.i2p_unchecked_node_pool.difference_update(to_connect)
if len(shared.i2p_node_pool) > 8:
to_connect.update(random.sample(
tuple(shared.i2p_node_pool), 8))
else:
to_connect.update(shared.i2p_node_pool)
for host, port in to_connect:
group = structure.NetAddrNoPrefix.network_group(host)
if group in hosts:
continue
if port == 'i2p' and shared.i2p_enabled:
if shared.i2p_session_nick and host != shared.i2p_dest_pub:
2017-06-11 07:55:53 +02:00
try:
2021-03-09 15:40:59 +01:00
d = I2PDialer(
shared,
host, shared.i2p_session_nick,
2021-03-09 15:40:59 +01:00
shared.i2p_sam_host, shared.i2p_sam_port)
d.start()
hosts.add(d.destination)
shared.i2p_dialers.add(d)
2021-03-08 16:06:07 +01:00
except Exception:
logging.warning(
'Exception while trying to establish'
' an I2P connection', exc_info=True)
2017-06-09 20:41:33 +02:00
else:
continue
else:
c = (Connection if not shared.socks_proxy
else SocksConnection)(host, port)
2017-06-09 20:41:33 +02:00
c.start()
hosts.add(group)
with shared.connections_lock:
shared.connections.add(c)
2016-07-09 19:37:54 +02:00
shared.hosts = hosts
2016-06-30 10:11:33 +02:00
@staticmethod
def load_data():
"""Loads initial nodes and data, stored in files between sessions"""
try:
with open(
os.path.join(shared.data_directory, 'objects.pickle'), 'br'
) as src:
shared.objects = pickle.load(src)
except FileNotFoundError:
pass # first start
except Exception:
logging.warning(
'Error while loading objects from disk.', exc_info=True)
try:
with open(
os.path.join(shared.data_directory, 'nodes.pickle'), 'br'
) as src:
shared.node_pool = pickle.load(src)
except FileNotFoundError:
pass
except Exception:
logging.warning(
'Error while loading nodes from disk.', exc_info=True)
try:
with open(
os.path.join(shared.data_directory, 'i2p_nodes.pickle'), 'br'
) as src:
shared.i2p_node_pool = pickle.load(src)
except FileNotFoundError:
pass
except Exception:
logging.warning(
'Error while loading nodes from disk.', exc_info=True)
try:
with open(
os.path.join(shared.data_directory, 'onion_nodes.pickle'), 'br'
) as src:
shared.onion_pool = pickle.load(src)
except FileNotFoundError:
pass
except Exception:
logging.warning(
'Error while loading nodes from disk.', exc_info=True)
with open(
os.path.join(shared.source_directory, 'core_nodes.csv'),
'r', newline='', encoding='ascii'
) as src:
reader = csv.reader(src)
shared.core_nodes = {(row[0], int(row[1])) for row in reader}
shared.node_pool.update(shared.core_nodes)
with open(
os.path.join(shared.source_directory, 'i2p_core_nodes.csv'),
'r', newline='', encoding='ascii'
) as f:
reader = csv.reader(f)
shared.i2p_core_nodes = {
(row[0].encode(), 'i2p') for row in reader}
shared.i2p_node_pool.update(shared.i2p_core_nodes)
2016-06-30 10:11:33 +02:00
@staticmethod
def pickle_objects():
try:
2021-03-08 16:06:07 +01:00
with open(
os.path.join(shared.data_directory, 'objects.pickle'), 'bw'
) as dst:
2016-06-30 10:11:33 +02:00
with shared.objects_lock:
2021-03-08 16:06:07 +01:00
pickle.dump(shared.objects, dst, protocol=3)
2016-06-30 10:11:33 +02:00
logging.debug('Saved objects')
except Exception:
logging.warning('Error while saving objects', exc_info=True)
2016-06-30 10:11:33 +02:00
@staticmethod
def pickle_nodes():
if len(shared.node_pool) > 10000:
shared.node_pool = set(random.sample(
tuple(shared.node_pool), 10000))
2016-06-30 10:11:33 +02:00
if len(shared.unchecked_node_pool) > 1000:
shared.unchecked_node_pool = set(random.sample(
tuple(shared.unchecked_node_pool), 1000))
if len(shared.i2p_node_pool) > 1000:
shared.i2p_node_pool = set(random.sample(
tuple(shared.i2p_node_pool), 1000))
if len(shared.i2p_unchecked_node_pool) > 100:
shared.i2p_unchecked_node_pool = set(random.sample(
tuple(shared.i2p_unchecked_node_pool), 100))
2017-07-20 18:22:59 +02:00
if len(shared.onion_pool) > 1000:
shared.onion_pool = set(
random.sample(shared.onion_pool, 1000))
if len(shared.onion_unchecked_pool) > 100:
shared.onion_unchecked_pool = set(
random.sample(shared.onion_unchecked_pool, 100))
2016-06-30 10:11:33 +02:00
try:
2021-03-08 16:06:07 +01:00
with open(
os.path.join(shared.data_directory, 'nodes.pickle'), 'bw'
) as dst:
pickle.dump(shared.node_pool, dst, protocol=3)
with open(
os.path.join(shared.data_directory, 'i2p_nodes.pickle'), 'bw'
) as dst:
pickle.dump(shared.i2p_node_pool, dst, protocol=3)
with open(
os.path.join(shared.data_directory, 'onion_nodes.pickle'), 'bw'
) as dst:
pickle.dump(shared.onion_pool, dst, protocol=3)
logging.debug('Saved nodes')
2021-03-08 16:06:07 +01:00
except Exception:
logging.warning('Error while saving nodes', exc_info=True)
@staticmethod
def publish_i2p_destination():
if shared.i2p_session_nick and not shared.i2p_transient:
logging.info('Publishing our I2P destination')
2021-03-08 16:06:07 +01:00
dest_pub_raw = base64.b64decode(
shared.i2p_dest_pub, altchars=b'-~')
obj = structure.Object(
b'\x00' * 8, int(time.time() + 2 * 3600),
shared.i2p_dest_obj_type, shared.i2p_dest_obj_version,
shared.stream, dest_pub_raw)
2021-08-02 18:53:19 +02:00
proofofwork.do_pow_and_publish(obj)
@staticmethod
def publish_onion_peer():
if shared.onion_hostname:
logging.info('Publishing our onion peer')
obj = structure.OnionPeer(
shared.onion_hostname, shared.listening_port).to_object()
proofofwork.do_pow_and_publish(obj)