Compare commits
55 Commits
Author | SHA1 | Date | |
---|---|---|---|
a2bf898bca | |||
6372578bb6 | |||
1aca3e6931 | |||
ecfafcc385 | |||
34d2e54d56 | |||
dba455464a | |||
260c839e26 | |||
22f0282ef9 | |||
c2abf2879a | |||
73893dabf7 | |||
8ffcf92380 | |||
1f64719a56 | |||
4daf047d51 | |||
c8510557f2 | |||
fb243dd8e9 | |||
5f0ab21268 | |||
9732f0c7f5 | |||
fe6a6af1db | |||
56e92a747a | |||
cb239c7d68 | |||
ec4e24185a | |||
d17de19aca | |||
c5a1310083 | |||
a0537a9e66 | |||
b1749c368c | |||
651a4f9681 | |||
ff0df70388 | |||
947607937c | |||
b465ddff85 | |||
d6de7c8d1e | |||
59fcd9eb2b | |||
1400486b22 | |||
fffb5e6052 | |||
cd6f82bc2a | |||
63700885a0 | |||
a10a905407 | |||
bdfd39a163 | |||
452fe8d5f1 | |||
25cb745f05 | |||
27e72d2027 | |||
2b312c4255 | |||
b83262fe66 | |||
1caf3288b2 | |||
fa50a844ad | |||
b9e5b07e1b | |||
d1b0f06ac1 | |||
2147a6a716 | |||
21fe906ac3 | |||
2f4cb203c8 | |||
fd5c2c803d | |||
78e14c1e62 | |||
13d1a94ddf | |||
16c8c412bf | |||
5ee3eec0ae | |||
4b38debc35 |
|
@ -9,7 +9,8 @@ RUN apt-add-repository ppa:purplei2p/i2pd && apt-get update -qq
|
||||||
RUN apt-get install -yq --no-install-suggests --no-install-recommends \
|
RUN apt-get install -yq --no-install-suggests --no-install-recommends \
|
||||||
python3-dev python3-pip python-is-python3 python3.11-dev python3.11-venv
|
python3-dev python3-pip python-is-python3 python3.11-dev python3.11-venv
|
||||||
|
|
||||||
RUN apt-get install -yq --no-install-suggests --no-install-recommends sudo i2pd
|
RUN apt-get install -yq --no-install-suggests --no-install-recommends \
|
||||||
|
sudo i2pd tor
|
||||||
|
|
||||||
RUN echo 'builder ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
|
RUN echo 'builder ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
sudo service i2pd start
|
sudo service i2pd start
|
||||||
|
sudo service tor start
|
||||||
|
|
|
@ -30,7 +30,7 @@ usage: main.py [-h] [-p PORT] [--host HOST] [--debug] [--data-dir DATA_DIR]
|
||||||
[--connection-limit CONNECTION_LIMIT] [--i2p]
|
[--connection-limit CONNECTION_LIMIT] [--i2p]
|
||||||
[--i2p-tunnel-length I2P_TUNNEL_LENGTH]
|
[--i2p-tunnel-length I2P_TUNNEL_LENGTH]
|
||||||
[--i2p-sam-host I2P_SAM_HOST] [--i2p-sam-port I2P_SAM_PORT]
|
[--i2p-sam-host I2P_SAM_HOST] [--i2p-sam-port I2P_SAM_PORT]
|
||||||
[--i2p-transient]
|
[--i2p-transient] [--socks-proxy SOCKS_PROXY] [--tor]
|
||||||
|
|
||||||
optional arguments:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
|
@ -53,6 +53,10 @@ optional arguments:
|
||||||
--i2p-sam-port I2P_SAM_PORT
|
--i2p-sam-port I2P_SAM_PORT
|
||||||
Port of I2P SAMv3 bridge
|
Port of I2P SAMv3 bridge
|
||||||
--i2p-transient Generate new I2P destination on start
|
--i2p-transient Generate new I2P destination on start
|
||||||
|
--socks-proxy SOCKS_PROXY
|
||||||
|
SOCKS proxy address in the form <HOST>:<PORT>
|
||||||
|
--tor The SOCKS proxy is tor, use 127.0.0.1:9050 if not
|
||||||
|
specified, start tor and setup a hidden service
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,9 @@ import base64
|
||||||
import errno
|
import errno
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
|
import os
|
||||||
import random
|
import random
|
||||||
|
import re
|
||||||
import select
|
import select
|
||||||
import socket
|
import socket
|
||||||
import ssl
|
import ssl
|
||||||
|
@ -20,22 +22,19 @@ class ConnectionBase(threading.Thread):
|
||||||
Common code for the connection thread
|
Common code for the connection thread
|
||||||
with minimum command handlers to reuse
|
with minimum command handlers to reuse
|
||||||
"""
|
"""
|
||||||
def __init__(
|
def __init__(self, host, port, s=None, server=False):
|
||||||
self, host, port, s=None, network='ip', server=False,
|
|
||||||
i2p_remote_dest=b''
|
|
||||||
):
|
|
||||||
self.host = host
|
self.host = host
|
||||||
self.port = port
|
self.port = port
|
||||||
self.network = network
|
self.network = 'i2p' if port == 'i2p' else 'ip'
|
||||||
self.i2p_remote_dest = i2p_remote_dest
|
|
||||||
|
|
||||||
if self.network == 'i2p':
|
self.host_print = (
|
||||||
self.host_print = self.i2p_remote_dest[:8].decode()
|
self.host[:8].decode() if self.network == 'i2p' else self.host)
|
||||||
else:
|
|
||||||
self.host_print = self.host
|
|
||||||
|
|
||||||
super().__init__(name='Connection to {}:{}'.format(host, port))
|
super().__init__(name='Connection to {}:{}'.format(host, port))
|
||||||
|
|
||||||
|
self.s = s
|
||||||
|
self.server = server
|
||||||
|
|
||||||
self.send_queue = queue.Queue()
|
self.send_queue = queue.Queue()
|
||||||
|
|
||||||
self.vectors_to_get = set()
|
self.vectors_to_get = set()
|
||||||
|
@ -43,22 +42,13 @@ class ConnectionBase(threading.Thread):
|
||||||
|
|
||||||
self.vectors_requested = {}
|
self.vectors_requested = {}
|
||||||
|
|
||||||
self.status = 'ready'
|
self.status = 'connected' if bool(s) else 'ready'
|
||||||
|
|
||||||
self.tls = False
|
|
||||||
|
|
||||||
self.verack_received = False
|
self.verack_received = False
|
||||||
self.verack_sent = False
|
self.verack_sent = False
|
||||||
|
|
||||||
self.s = s
|
|
||||||
|
|
||||||
self.remote_version = None
|
self.remote_version = None
|
||||||
|
|
||||||
self.server = server
|
|
||||||
|
|
||||||
if bool(s):
|
|
||||||
self.status = 'connected'
|
|
||||||
|
|
||||||
self.buffer_receive = b''
|
self.buffer_receive = b''
|
||||||
self.buffer_send = b''
|
self.buffer_send = b''
|
||||||
|
|
||||||
|
@ -78,9 +68,14 @@ class ConnectionBase(threading.Thread):
|
||||||
self.s.settimeout(0)
|
self.s.settimeout(0)
|
||||||
if not self.server:
|
if not self.server:
|
||||||
if self.network == 'ip':
|
if self.network == 'ip':
|
||||||
self.send_queue.put(message.Version(self.host, self.port))
|
version_kwargs = (
|
||||||
|
{'services': 1} if self.host.endswith('.onion') else {})
|
||||||
|
self.send_queue.put(message.Version(
|
||||||
|
('127.0.0.1' if shared.socks_proxy else self.host),
|
||||||
|
self.port, **version_kwargs))
|
||||||
else:
|
else:
|
||||||
self.send_queue.put(message.Version('127.0.0.1', 7656))
|
self.send_queue.put(message.Version(
|
||||||
|
'127.0.0.1', 7656, nonce=self._get_nonce()))
|
||||||
while True:
|
while True:
|
||||||
if (
|
if (
|
||||||
self.on_connection_fully_established_scheduled
|
self.on_connection_fully_established_scheduled
|
||||||
|
@ -149,6 +144,14 @@ class ConnectionBase(threading.Thread):
|
||||||
break
|
break
|
||||||
time.sleep(0.2)
|
time.sleep(0.2)
|
||||||
|
|
||||||
|
def _get_nonce(self):
|
||||||
|
nonce = shared.nonce_pool.get(('127.0.0.1', 8448))
|
||||||
|
if nonce is None:
|
||||||
|
nonce = os.urandom(8)
|
||||||
|
shared.nonce_pool[('127.0.0.1', 8448)] = nonce
|
||||||
|
|
||||||
|
return nonce
|
||||||
|
|
||||||
def _connect(self):
|
def _connect(self):
|
||||||
peer_str = '{0.host_print}:{0.port}'.format(self)
|
peer_str = '{0.host_print}:{0.port}'.format(self)
|
||||||
logging.debug('Connecting to %s', peer_str)
|
logging.debug('Connecting to %s', peer_str)
|
||||||
|
@ -238,7 +241,7 @@ class ConnectionBase(threading.Thread):
|
||||||
logging.debug('ssl.SSLError reason: %s', e.reason)
|
logging.debug('ssl.SSLError reason: %s', e.reason)
|
||||||
shared.node_pool.discard((self.host, self.port))
|
shared.node_pool.discard((self.host, self.port))
|
||||||
return
|
return
|
||||||
self.tls = True
|
|
||||||
logging.debug(
|
logging.debug(
|
||||||
'Established TLS connection with %s:%s (%s)',
|
'Established TLS connection with %s:%s (%s)',
|
||||||
self.host_print, self.port, self.s.version())
|
self.host_print, self.port, self.s.version())
|
||||||
|
@ -257,12 +260,17 @@ class ConnectionBase(threading.Thread):
|
||||||
'Established Bitmessage protocol connection to %s:%s',
|
'Established Bitmessage protocol connection to %s:%s',
|
||||||
self.host_print, self.port)
|
self.host_print, self.port)
|
||||||
self.on_connection_fully_established_scheduled = False
|
self.on_connection_fully_established_scheduled = False
|
||||||
if self.remote_version.services & 2 and self.network == 'ip':
|
if ( # NODE_SSL
|
||||||
self._do_tls_handshake() # NODE_SSL
|
self.remote_version.services & 2 and self.network == 'ip'
|
||||||
|
and not self.host.endswith('.onion')
|
||||||
|
and not (self.server and shared.tor)
|
||||||
|
):
|
||||||
|
self._do_tls_handshake()
|
||||||
|
|
||||||
addr = {
|
addr = {
|
||||||
structure.NetAddr(c.remote_version.services, c.host, c.port)
|
structure.NetAddr(c.remote_version.services, c.host, c.port)
|
||||||
for c in shared.connections if c.network != 'i2p'
|
for c in shared.connections if c.network != 'i2p'
|
||||||
|
and not c.host.endswith('.onion')
|
||||||
and c.server is False and c.status == 'fully_established'}
|
and c.server is False and c.status == 'fully_established'}
|
||||||
# pylint: disable=unsubscriptable-object
|
# pylint: disable=unsubscriptable-object
|
||||||
# https://github.com/pylint-dev/pylint/issues/3637
|
# https://github.com/pylint-dev/pylint/issues/3637
|
||||||
|
@ -277,22 +285,11 @@ class ConnectionBase(threading.Thread):
|
||||||
if len(addr) != 0:
|
if len(addr) != 0:
|
||||||
self.send_queue.put(message.Addr(addr))
|
self.send_queue.put(message.Addr(addr))
|
||||||
|
|
||||||
with shared.objects_lock:
|
if shared.objects:
|
||||||
if len(shared.objects) > 0:
|
for chunk in shared.objects.biginv_chunks(10000):
|
||||||
to_send = {
|
# We limit size of inv messages to 10000 entries
|
||||||
vector for vector in shared.objects.keys()
|
# because they might time out in very slow networks (I2P)
|
||||||
if shared.objects[vector].expires_time > time.time()}
|
self.send_queue.put(message.Inv(chunk))
|
||||||
while len(to_send) > 0:
|
|
||||||
if len(to_send) > 10000:
|
|
||||||
# We limit size of inv messaged to 10000 entries
|
|
||||||
# because they might time out
|
|
||||||
# in very slow networks (I2P)
|
|
||||||
pack = random.sample(tuple(to_send), 10000)
|
|
||||||
self.send_queue.put(message.Inv(pack))
|
|
||||||
to_send.difference_update(pack)
|
|
||||||
else:
|
|
||||||
self.send_queue.put(message.Inv(to_send))
|
|
||||||
to_send.clear()
|
|
||||||
self.status = 'fully_established'
|
self.status = 'fully_established'
|
||||||
|
|
||||||
def _process_queue(self):
|
def _process_queue(self):
|
||||||
|
@ -377,46 +374,64 @@ class ConnectionBase(threading.Thread):
|
||||||
if shared.stream not in version.streams:
|
if shared.stream not in version.streams:
|
||||||
raise ValueError('message not for stream %i' % shared.stream)
|
raise ValueError('message not for stream %i' % shared.stream)
|
||||||
logging.debug('%s:%s -> %s', self.host_print, self.port, version)
|
logging.debug('%s:%s -> %s', self.host_print, self.port, version)
|
||||||
|
nonce_print = base64.b16encode(version.nonce).decode()
|
||||||
if (
|
if (
|
||||||
version.protocol_version != shared.protocol_version
|
version.protocol_version != shared.protocol_version
|
||||||
or version.nonce == shared.nonce
|
or version.nonce == shared.nonce
|
||||||
|
or version.nonce in shared.nonce_pool.values()
|
||||||
):
|
):
|
||||||
|
logging.warning(
|
||||||
|
'Disconnecting v%s node %s with nonce %s',
|
||||||
|
version.protocol_version, self.host_print, nonce_print)
|
||||||
|
shared.unchecked_node_pool.discard((self.host, self.port))
|
||||||
self.status = 'disconnecting'
|
self.status = 'disconnecting'
|
||||||
self.send_queue.put(None)
|
self.send_queue.put(None)
|
||||||
else:
|
else:
|
||||||
|
shared.nonce_pool[(self.host, self.port)] = version.nonce
|
||||||
logging.info(
|
logging.info(
|
||||||
'%s:%s claims to be %s',
|
'%s:%s claims to be %s (%s)',
|
||||||
self.host_print, self.port, version.user_agent)
|
self.host_print, self.port, version.user_agent, nonce_print)
|
||||||
self.send_queue.put(message.Message(b'verack', b''))
|
self.send_queue.put(message.Message(b'verack', b''))
|
||||||
self.verack_sent = True
|
self.verack_sent = True
|
||||||
self.remote_version = version
|
self.remote_version = version
|
||||||
if not self.server:
|
if not self.server:
|
||||||
self.send_queue.put('fully_established')
|
self.send_queue.put('fully_established')
|
||||||
if self.network == 'ip':
|
if self.network == 'ip':
|
||||||
shared.address_advertise_queue.put(structure.NetAddr(
|
if self.host.endswith('.onion'):
|
||||||
version.services, self.host, self.port))
|
shared.onion_pool.add((self.host, self.port))
|
||||||
shared.node_pool.add((self.host, self.port))
|
else:
|
||||||
|
shared.address_advertise_queue.put(structure.NetAddr(
|
||||||
|
version.services, self.host, self.port))
|
||||||
|
shared.node_pool.add((self.host, self.port))
|
||||||
elif self.network == 'i2p':
|
elif self.network == 'i2p':
|
||||||
shared.i2p_node_pool.add((self.host, 'i2p'))
|
shared.i2p_node_pool.add((self.host, 'i2p'))
|
||||||
if self.network == 'ip':
|
if (
|
||||||
|
self.network == 'ip' and shared.listen_for_connections
|
||||||
|
and version.host != '127.0.0.1'
|
||||||
|
):
|
||||||
shared.address_advertise_queue.put(structure.NetAddr(
|
shared.address_advertise_queue.put(structure.NetAddr(
|
||||||
shared.services, version.host, shared.listening_port))
|
shared.services, version.host, shared.listening_port))
|
||||||
if self.server:
|
if self.server:
|
||||||
if self.network == 'ip':
|
if self.network == 'ip':
|
||||||
self.send_queue.put(message.Version(self.host, self.port))
|
version_kwargs = {'services': 1} if shared.tor else {}
|
||||||
|
self.send_queue.put(message.Version(
|
||||||
|
self.host, self.port, **version_kwargs))
|
||||||
else:
|
else:
|
||||||
self.send_queue.put(message.Version('127.0.0.1', 7656))
|
self.send_queue.put(message.Version(
|
||||||
|
'127.0.0.1', 7656, nonce=self._get_nonce()))
|
||||||
|
|
||||||
def _process_msg_addr(self, m):
|
def _process_msg_addr(self, m):
|
||||||
addr = message.Addr.from_message(m)
|
addr = message.Addr.from_message(m)
|
||||||
logging.debug('%s:%s -> %s', self.host_print, self.port, addr)
|
logging.debug('%s:%s -> %s', self.host_print, self.port, addr)
|
||||||
for a in addr.addresses:
|
for a in addr.addresses:
|
||||||
|
if not a.host or a.port == 0:
|
||||||
|
continue
|
||||||
if (a.host, a.port) not in shared.core_nodes:
|
if (a.host, a.port) not in shared.core_nodes:
|
||||||
shared.unchecked_node_pool.add((a.host, a.port))
|
shared.unchecked_node_pool.add((a.host, a.port))
|
||||||
|
|
||||||
def _request_objects(self):
|
def _request_objects(self):
|
||||||
if self.vectors_to_get and len(self.vectors_requested) < 100:
|
if self.vectors_to_get and len(self.vectors_requested) < 100:
|
||||||
self.vectors_to_get.difference_update(shared.objects.keys())
|
self.vectors_to_get = shared.objects.select(self.vectors_to_get)
|
||||||
if not self.wait_until:
|
if not self.wait_until:
|
||||||
nodes_count = (
|
nodes_count = (
|
||||||
len(shared.node_pool) + len(shared.unchecked_node_pool))
|
len(shared.node_pool) + len(shared.unchecked_node_pool))
|
||||||
|
@ -432,13 +447,13 @@ class ConnectionBase(threading.Thread):
|
||||||
self.send_queue.put(message.GetData(pack))
|
self.send_queue.put(message.GetData(pack))
|
||||||
self.vectors_requested.update({
|
self.vectors_requested.update({
|
||||||
vector: time.time() for vector in pack
|
vector: time.time() for vector in pack
|
||||||
if vector not in self.vectors_requested})
|
})
|
||||||
self.vectors_to_get.difference_update(pack)
|
self.vectors_to_get.difference_update(pack)
|
||||||
else:
|
else:
|
||||||
self.send_queue.put(message.GetData(self.vectors_to_get))
|
self.send_queue.put(message.GetData(self.vectors_to_get))
|
||||||
self.vectors_requested.update({
|
self.vectors_requested.update({
|
||||||
vector: time.time() for vector in self.vectors_to_get
|
vector: time.time() for vector in self.vectors_to_get
|
||||||
if vector not in self.vectors_requested})
|
})
|
||||||
self.vectors_to_get.clear()
|
self.vectors_to_get.clear()
|
||||||
if self.vectors_requested:
|
if self.vectors_requested:
|
||||||
self.vectors_requested = {
|
self.vectors_requested = {
|
||||||
|
@ -463,12 +478,10 @@ class ConnectionBase(threading.Thread):
|
||||||
else:
|
else:
|
||||||
to_send = self.vectors_to_send.copy()
|
to_send = self.vectors_to_send.copy()
|
||||||
self.vectors_to_send.clear()
|
self.vectors_to_send.clear()
|
||||||
with shared.objects_lock:
|
for vector in to_send:
|
||||||
for vector in to_send:
|
obj = shared.objects.get(vector)
|
||||||
obj = shared.objects.get(vector, None)
|
if obj:
|
||||||
if obj:
|
self.send_queue.put(message.Message(b'object', obj.data))
|
||||||
self.send_queue.put(
|
|
||||||
message.Message(b'object', obj.to_bytes()))
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(ConnectionBase):
|
class Connection(ConnectionBase):
|
||||||
|
@ -476,9 +489,7 @@ class Connection(ConnectionBase):
|
||||||
def _process_msg_inv(self, m):
|
def _process_msg_inv(self, m):
|
||||||
inv = message.Inv.from_message(m)
|
inv = message.Inv.from_message(m)
|
||||||
logging.debug('%s:%s -> %s', self.host_print, self.port, inv)
|
logging.debug('%s:%s -> %s', self.host_print, self.port, inv)
|
||||||
to_get = inv.vectors.copy()
|
self.vectors_to_get.update(shared.objects.select(inv.vectors))
|
||||||
to_get.difference_update(shared.objects.keys())
|
|
||||||
self.vectors_to_get.update(to_get)
|
|
||||||
# Do not send objects they already have.
|
# Do not send objects they already have.
|
||||||
self.vectors_to_send.difference_update(inv.vectors)
|
self.vectors_to_send.difference_update(inv.vectors)
|
||||||
|
|
||||||
|
@ -487,9 +498,8 @@ class Connection(ConnectionBase):
|
||||||
logging.debug('%s:%s -> %s', self.host_print, self.port, obj)
|
logging.debug('%s:%s -> %s', self.host_print, self.port, obj)
|
||||||
self.vectors_requested.pop(obj.vector, None)
|
self.vectors_requested.pop(obj.vector, None)
|
||||||
self.vectors_to_get.discard(obj.vector)
|
self.vectors_to_get.discard(obj.vector)
|
||||||
if obj.is_valid() and obj.vector not in shared.objects:
|
if obj.is_valid():
|
||||||
with shared.objects_lock:
|
shared.objects[obj.vector] = obj
|
||||||
shared.objects[obj.vector] = obj
|
|
||||||
if (
|
if (
|
||||||
obj.object_type == shared.i2p_dest_obj_type
|
obj.object_type == shared.i2p_dest_obj_type
|
||||||
and obj.version == shared.i2p_dest_obj_version
|
and obj.version == shared.i2p_dest_obj_version
|
||||||
|
@ -500,7 +510,15 @@ class Connection(ConnectionBase):
|
||||||
' adding to i2p_unchecked_node_pool')
|
' adding to i2p_unchecked_node_pool')
|
||||||
logging.debug(dest)
|
logging.debug(dest)
|
||||||
shared.i2p_unchecked_node_pool.add((dest, 'i2p'))
|
shared.i2p_unchecked_node_pool.add((dest, 'i2p'))
|
||||||
|
elif (
|
||||||
|
obj.object_type == shared.onion_obj_type
|
||||||
|
and obj.version == shared.onion_obj_version
|
||||||
|
):
|
||||||
|
peer = structure.OnionPeer.from_object(obj)
|
||||||
|
logging.debug('Received onion peer object: %s', peer)
|
||||||
|
shared.onion_unchecked_pool.add((peer.host, peer.port))
|
||||||
shared.vector_advertise_queue.put(obj.vector)
|
shared.vector_advertise_queue.put(obj.vector)
|
||||||
|
shared.objects.check(obj.vector)
|
||||||
|
|
||||||
def _process_msg_getdata(self, m):
|
def _process_msg_getdata(self, m):
|
||||||
getdata = message.GetData.from_message(m)
|
getdata = message.GetData.from_message(m)
|
||||||
|
@ -517,4 +535,46 @@ class Bootstrapper(ConnectionBase):
|
||||||
self.send_queue.put(None)
|
self.send_queue.put(None)
|
||||||
|
|
||||||
|
|
||||||
|
class SocksConnection(Connection):
|
||||||
|
"""The socks proxied connection"""
|
||||||
|
def _connect(self):
|
||||||
|
peer_str = '{0.host_print}:{0.port}'.format(self)
|
||||||
|
logging.debug('Connecting to %s', peer_str)
|
||||||
|
|
||||||
|
import socks # pylint: disable=import-outside-toplevel
|
||||||
|
|
||||||
|
proxy_type = socks.PROXY_TYPES[shared.socks_proxy.scheme.upper()]
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.s = socks.create_connection(
|
||||||
|
(self.host, self.port), 30, None, proxy_type,
|
||||||
|
shared.socks_proxy.hostname, shared.socks_proxy.port, True,
|
||||||
|
shared.socks_proxy.username, shared.socks_proxy.password, None)
|
||||||
|
self.status = 'connected'
|
||||||
|
logging.debug('Established SOCKS connection to %s', peer_str)
|
||||||
|
except socket.timeout:
|
||||||
|
pass
|
||||||
|
except socks.GeneralProxyError as e:
|
||||||
|
e = e.socket_err
|
||||||
|
if isinstance(e, socket.timeout) or (
|
||||||
|
# general failure, unreachable, refused
|
||||||
|
not e.errno and re.match(r'^0x0[1,4,5].*', e.msg)
|
||||||
|
):
|
||||||
|
logcall = logging.debug
|
||||||
|
else:
|
||||||
|
logcall = logging.info
|
||||||
|
logcall('Connection to %s failed. Reason: %s', peer_str, e)
|
||||||
|
except OSError as e:
|
||||||
|
# unreachable, refused, no route
|
||||||
|
(logging.info if e.errno not in (0, 101, 111, 113)
|
||||||
|
else logging.debug)(
|
||||||
|
'Connection to %s failed. Reason: %s', peer_str, e)
|
||||||
|
except Exception:
|
||||||
|
logging.info(
|
||||||
|
'Connection to %s failed.', peer_str, exc_info=True)
|
||||||
|
|
||||||
|
if self.status != 'connected':
|
||||||
|
self.status = 'failed'
|
||||||
|
|
||||||
|
|
||||||
shared.connection = Connection
|
shared.connection = Connection
|
||||||
|
|
|
@ -7,14 +7,15 @@ from .util import I2PThread
|
||||||
|
|
||||||
class I2PDialer(I2PThread):
|
class I2PDialer(I2PThread):
|
||||||
def __init__(
|
def __init__(
|
||||||
self, state, destination, nick, sam_host='127.0.0.1', sam_port=7656
|
self, state, destination, nick=None, *, sam_host=None, sam_port=None
|
||||||
):
|
):
|
||||||
|
|
||||||
self.sam_host = sam_host
|
# Initially 127.0.0.1:7656
|
||||||
self.sam_port = sam_port
|
self.sam_host = sam_host or state.i2p_sam_host
|
||||||
|
self.sam_port = sam_port or state.i2p_sam_port
|
||||||
|
|
||||||
self.nick = nick
|
|
||||||
self.destination = destination
|
self.destination = destination
|
||||||
|
self.nick = nick or state.i2p_session_nick
|
||||||
|
|
||||||
super().__init__(state, name='I2P Dial to {}'.format(self.destination))
|
super().__init__(state, name='I2P Dial to {}'.format(self.destination))
|
||||||
|
|
||||||
|
@ -27,9 +28,7 @@ class I2PDialer(I2PThread):
|
||||||
logging.debug('Connecting to %s', self.destination)
|
logging.debug('Connecting to %s', self.destination)
|
||||||
self._connect()
|
self._connect()
|
||||||
if not self.state.shutting_down and self.success:
|
if not self.state.shutting_down and self.success:
|
||||||
c = self.state.connection(
|
c = self.state.connection(self.destination, 'i2p', self.s, False)
|
||||||
self.destination, 'i2p', self.s, 'i2p',
|
|
||||||
False, self.destination)
|
|
||||||
c.start()
|
c.start()
|
||||||
self.state.connections.add(c)
|
self.state.connections.add(c)
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,6 @@ class I2PListener(I2PThread):
|
||||||
|
|
||||||
self.version_reply = []
|
self.version_reply = []
|
||||||
|
|
||||||
self.new_socket()
|
|
||||||
|
|
||||||
def new_socket(self):
|
def new_socket(self):
|
||||||
self.s = socket.create_connection((self.host, self.port))
|
self.s = socket.create_connection((self.host, self.port))
|
||||||
self._send(b'HELLO VERSION MIN=3.0 MAX=3.3\n')
|
self._send(b'HELLO VERSION MIN=3.0 MAX=3.3\n')
|
||||||
|
@ -31,26 +29,31 @@ class I2PListener(I2PThread):
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
while not self.state.shutting_down:
|
while not self.state.shutting_down:
|
||||||
|
self.new_socket()
|
||||||
|
duplicate = False
|
||||||
try:
|
try:
|
||||||
destination = self._receive_line().split()[0]
|
destination = self._receive_line().split()[0]
|
||||||
logging.info(
|
logging.info(
|
||||||
'Incoming I2P connection from: %s', destination.decode())
|
'Incoming I2P connection from: %s', destination.decode())
|
||||||
|
|
||||||
hosts = set()
|
|
||||||
for c in self.state.connections.copy():
|
|
||||||
hosts.add(c.host)
|
|
||||||
for d in self.state.i2p_dialers.copy():
|
|
||||||
hosts.add(d.destination)
|
|
||||||
if destination in hosts:
|
|
||||||
logging.debug('Rejecting duplicate I2P connection.')
|
|
||||||
self.s.close()
|
|
||||||
else:
|
|
||||||
c = self.state.connection(
|
|
||||||
destination, 'i2p', self.s, 'i2p', True, destination)
|
|
||||||
c.start()
|
|
||||||
self.state.connections.add(c)
|
|
||||||
c = None
|
|
||||||
self.new_socket()
|
|
||||||
except socket.timeout:
|
except socket.timeout:
|
||||||
pass
|
continue
|
||||||
|
|
||||||
|
for c in self.state.connections.copy():
|
||||||
|
if c.host == destination:
|
||||||
|
duplicate = True
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
for d in self.state.i2p_dialers.copy():
|
||||||
|
if d.destination == destination:
|
||||||
|
duplicate = True
|
||||||
|
break
|
||||||
|
if duplicate:
|
||||||
|
logging.info('Rejecting duplicate I2P connection.')
|
||||||
|
self.s.close()
|
||||||
|
else:
|
||||||
|
c = self.state.connection(destination, 'i2p', self.s, True)
|
||||||
|
c.start()
|
||||||
|
self.state.connections.add(c)
|
||||||
|
c = None
|
||||||
|
|
||||||
logging.debug('Shutting down I2P Listener')
|
logging.debug('Shutting down I2P Listener')
|
||||||
|
|
|
@ -5,10 +5,17 @@ import base64
|
||||||
import logging
|
import logging
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import signal
|
import signal
|
||||||
import socket
|
import socket
|
||||||
|
from urllib import parse
|
||||||
|
|
||||||
from . import i2p, shared
|
try:
|
||||||
|
import socks
|
||||||
|
except ImportError:
|
||||||
|
socks = None
|
||||||
|
|
||||||
|
from . import i2p, shared, sql
|
||||||
from .advertiser import Advertiser
|
from .advertiser import Advertiser
|
||||||
from .manager import Manager
|
from .manager import Manager
|
||||||
from .listener import Listener
|
from .listener import Listener
|
||||||
|
@ -52,6 +59,16 @@ def parse_arguments(): # pylint: disable=too-many-branches,too-many-statements
|
||||||
'--i2p-transient', action='store_true',
|
'--i2p-transient', action='store_true',
|
||||||
help='Generate new I2P destination on start')
|
help='Generate new I2P destination on start')
|
||||||
|
|
||||||
|
if socks is not None:
|
||||||
|
parser.add_argument(
|
||||||
|
'--socks-proxy',
|
||||||
|
help='SOCKS proxy address in the form <HOST>:<PORT>')
|
||||||
|
parser.add_argument(
|
||||||
|
'--tor', action='store_true',
|
||||||
|
help='The SOCKS proxy is tor, use 127.0.0.1:9050 if not specified,'
|
||||||
|
' start tor and setup a hidden service'
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
if args.port:
|
if args.port:
|
||||||
shared.listening_port = args.port
|
shared.listening_port = args.port
|
||||||
|
@ -71,7 +88,8 @@ def parse_arguments(): # pylint: disable=too-many-branches,too-many-statements
|
||||||
if args.no_ip:
|
if args.no_ip:
|
||||||
shared.ip_enabled = False
|
shared.ip_enabled = False
|
||||||
if args.trusted_peer:
|
if args.trusted_peer:
|
||||||
if len(args.trusted_peer) > 50:
|
if len(args.trusted_peer
|
||||||
|
) > 50 and not args.trusted_peer.endswith('onion'):
|
||||||
# I2P
|
# I2P
|
||||||
shared.trusted_peer = (args.trusted_peer.encode(), 'i2p')
|
shared.trusted_peer = (args.trusted_peer.encode(), 'i2p')
|
||||||
else:
|
else:
|
||||||
|
@ -99,6 +117,17 @@ def parse_arguments(): # pylint: disable=too-many-branches,too-many-statements
|
||||||
if args.i2p_transient:
|
if args.i2p_transient:
|
||||||
shared.i2p_transient = True
|
shared.i2p_transient = True
|
||||||
|
|
||||||
|
if socks is None:
|
||||||
|
return
|
||||||
|
if args.tor:
|
||||||
|
shared.tor = True
|
||||||
|
if not args.socks_proxy:
|
||||||
|
args.socks_proxy = '127.0.0.1:9050'
|
||||||
|
if args.socks_proxy:
|
||||||
|
if not re.match(r'^.*://', args.socks_proxy):
|
||||||
|
args.socks_proxy = '//' + args.socks_proxy
|
||||||
|
shared.socks_proxy = parse.urlparse(args.socks_proxy, scheme='socks5')
|
||||||
|
|
||||||
|
|
||||||
def bootstrap_from_dns():
|
def bootstrap_from_dns():
|
||||||
"""Addes addresses of bootstrap servers to core nodes"""
|
"""Addes addresses of bootstrap servers to core nodes"""
|
||||||
|
@ -161,10 +190,9 @@ def start_ip_listener():
|
||||||
def start_i2p_listener():
|
def start_i2p_listener():
|
||||||
"""Starts I2P threads"""
|
"""Starts I2P threads"""
|
||||||
# Grab I2P destinations from old object file
|
# Grab I2P destinations from old object file
|
||||||
for obj in shared.objects.values():
|
for obj in shared.objects.filter(object_type=shared.i2p_dest_obj_type):
|
||||||
if obj.object_type == shared.i2p_dest_obj_type:
|
shared.i2p_unchecked_node_pool.add((
|
||||||
shared.i2p_unchecked_node_pool.add((
|
base64.b64encode(obj.object_payload, altchars=b'-~'), 'i2p'))
|
||||||
base64.b64encode(obj.object_payload, altchars=b'-~'), 'i2p'))
|
|
||||||
|
|
||||||
dest_priv = b''
|
dest_priv = b''
|
||||||
|
|
||||||
|
@ -242,9 +270,34 @@ def main():
|
||||||
'Error while creating data directory in: %s',
|
'Error while creating data directory in: %s',
|
||||||
shared.data_directory, exc_info=True)
|
shared.data_directory, exc_info=True)
|
||||||
|
|
||||||
if shared.ip_enabled and not shared.trusted_peer:
|
if shared.socks_proxy and shared.send_outgoing_connections:
|
||||||
|
try:
|
||||||
|
socks.PROXY_TYPES[shared.socks_proxy.scheme.upper()]
|
||||||
|
except KeyError:
|
||||||
|
logging.error('Unsupported proxy schema!')
|
||||||
|
return
|
||||||
|
|
||||||
|
if shared.tor:
|
||||||
|
try:
|
||||||
|
from . import tor # pylint: disable=import-outside-toplevel
|
||||||
|
if not tor.start_tor_service():
|
||||||
|
logging.warning('The tor service has not started.')
|
||||||
|
tor = None
|
||||||
|
except ImportError:
|
||||||
|
logging.info('Failed to import tor module.', exc_info=True)
|
||||||
|
tor = None
|
||||||
|
|
||||||
|
if not tor:
|
||||||
|
try:
|
||||||
|
socket.socket().bind(('127.0.0.1', 9050))
|
||||||
|
return
|
||||||
|
except (OSError, socket.error):
|
||||||
|
pass
|
||||||
|
elif shared.ip_enabled and not shared.trusted_peer:
|
||||||
bootstrap_from_dns()
|
bootstrap_from_dns()
|
||||||
|
|
||||||
|
shared.objects = sql.Inventory()
|
||||||
|
|
||||||
if shared.i2p_enabled:
|
if shared.i2p_enabled:
|
||||||
# We are starting it before cleaning expired objects
|
# We are starting it before cleaning expired objects
|
||||||
# so we can collect I2P destination objects
|
# so we can collect I2P destination objects
|
||||||
|
|
|
@ -11,7 +11,7 @@ import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from . import proofofwork, shared, structure
|
from . import proofofwork, shared, structure
|
||||||
from .connection import Bootstrapper, Connection
|
from .connection import Bootstrapper, Connection, SocksConnection
|
||||||
from .i2p import I2PDialer
|
from .i2p import I2PDialer
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,11 +23,14 @@ class Manager(threading.Thread):
|
||||||
self.bootstrap_pool = []
|
self.bootstrap_pool = []
|
||||||
self.last_cleaned_objects = time.time()
|
self.last_cleaned_objects = time.time()
|
||||||
self.last_cleaned_connections = time.time()
|
self.last_cleaned_connections = time.time()
|
||||||
self.last_pickled_objects = time.time()
|
|
||||||
self.last_pickled_nodes = time.time()
|
self.last_pickled_nodes = time.time()
|
||||||
# Publish destination 5-15 minutes after start
|
# Publish destination 5-15 minutes after start
|
||||||
self.last_published_i2p_destination = \
|
self.last_published_i2p_destination = \
|
||||||
time.time() - 50 * 60 + random.uniform(-1, 1) * 300 # nosec B311
|
time.time() - 50 * 60 + random.uniform(-1, 1) * 300 # nosec B311
|
||||||
|
# Publish onion 4-8 min later
|
||||||
|
self.last_published_onion_peer = \
|
||||||
|
self.last_published_i2p_destination - 3 * 3600 + \
|
||||||
|
random.uniform(1, 2) * 240 # nosec B311
|
||||||
|
|
||||||
def fill_bootstrap_pool(self):
|
def fill_bootstrap_pool(self):
|
||||||
"""Populate the bootstrap pool by core nodes and checked ones"""
|
"""Populate the bootstrap pool by core nodes and checked ones"""
|
||||||
|
@ -36,41 +39,30 @@ class Manager(threading.Thread):
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.load_data()
|
self.load_data()
|
||||||
self.clean_objects()
|
shared.objects.cleanup()
|
||||||
self.fill_bootstrap_pool()
|
self.fill_bootstrap_pool()
|
||||||
while True:
|
while True:
|
||||||
time.sleep(0.8)
|
time.sleep(0.8)
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if shared.shutting_down:
|
if shared.shutting_down:
|
||||||
logging.debug('Shutting down Manager')
|
logging.debug('Shutting down Manager')
|
||||||
|
shared.objects.flush()
|
||||||
break
|
break
|
||||||
if now - self.last_cleaned_objects > 90:
|
if now - self.last_cleaned_objects > 90:
|
||||||
self.clean_objects()
|
shared.objects.cleanup()
|
||||||
self.last_cleaned_objects = now
|
self.last_cleaned_objects = now
|
||||||
if now - self.last_cleaned_connections > 2:
|
if now - self.last_cleaned_connections > 2:
|
||||||
self.manage_connections()
|
self.manage_connections()
|
||||||
self.last_cleaned_connections = now
|
self.last_cleaned_connections = now
|
||||||
if now - self.last_pickled_objects > 100:
|
|
||||||
self.pickle_objects()
|
|
||||||
self.last_pickled_objects = now
|
|
||||||
if now - self.last_pickled_nodes > 60:
|
if now - self.last_pickled_nodes > 60:
|
||||||
self.pickle_nodes()
|
self.pickle_nodes()
|
||||||
self.last_pickled_nodes = now
|
self.last_pickled_nodes = now
|
||||||
if now - self.last_published_i2p_destination > 3600:
|
if now - self.last_published_i2p_destination > 3600:
|
||||||
self.publish_i2p_destination()
|
self.publish_i2p_destination()
|
||||||
self.last_published_i2p_destination = now
|
self.last_published_i2p_destination = now
|
||||||
|
if now - self.last_published_onion_peer > 3600 * 4:
|
||||||
@staticmethod
|
self.publish_onion_peer()
|
||||||
def clean_objects():
|
self.last_published_onion_peer = now
|
||||||
"""Remove expired objects"""
|
|
||||||
for vector in set(shared.objects):
|
|
||||||
# FIXME: no need to check is_valid() here
|
|
||||||
if shared.objects[vector].is_expired():
|
|
||||||
logging.debug(
|
|
||||||
'Deleted expired object: %s',
|
|
||||||
base64.b16encode(vector).decode())
|
|
||||||
with shared.objects_lock:
|
|
||||||
del shared.objects[vector]
|
|
||||||
|
|
||||||
def manage_connections(self):
|
def manage_connections(self):
|
||||||
"""Open new connections if needed, remove closed ones"""
|
"""Open new connections if needed, remove closed ones"""
|
||||||
|
@ -96,13 +88,20 @@ class Manager(threading.Thread):
|
||||||
self.fill_bootstrap_pool()
|
self.fill_bootstrap_pool()
|
||||||
return
|
return
|
||||||
logging.info('Starting a bootstrapper for %s:%s', *target)
|
logging.info('Starting a bootstrapper for %s:%s', *target)
|
||||||
|
shared.node_pool.discard(target)
|
||||||
connect(target, Bootstrapper)
|
connect(target, Bootstrapper)
|
||||||
|
|
||||||
outgoing_connections = 0
|
outgoing_connections = 0
|
||||||
for c in shared.connections.copy():
|
for c in shared.connections.copy():
|
||||||
if not c.is_alive() or c.status == 'disconnected':
|
if not c.is_alive() or c.status == 'disconnected':
|
||||||
|
shared.objects.check(
|
||||||
|
*(c.vectors_to_get | c.vectors_requested.keys()))
|
||||||
with shared.connections_lock:
|
with shared.connections_lock:
|
||||||
shared.connections.remove(c)
|
shared.connections.remove(c)
|
||||||
|
try:
|
||||||
|
del shared.nonce_pool[(c.host, c.port)]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
else:
|
else:
|
||||||
hosts.add(structure.NetAddrNoPrefix.network_group(c.host))
|
hosts.add(structure.NetAddrNoPrefix.network_group(c.host))
|
||||||
if not c.server:
|
if not c.server:
|
||||||
|
@ -123,17 +122,31 @@ class Manager(threading.Thread):
|
||||||
):
|
):
|
||||||
|
|
||||||
if shared.ip_enabled:
|
if shared.ip_enabled:
|
||||||
if len(shared.unchecked_node_pool) > 16:
|
sample_length = 16
|
||||||
|
if shared.tor:
|
||||||
|
if len(shared.onion_unchecked_pool) > 4:
|
||||||
|
to_connect.update(random.sample(
|
||||||
|
tuple(shared.onion_unchecked_pool), 4))
|
||||||
|
else:
|
||||||
|
to_connect.update(shared.onion_unchecked_pool)
|
||||||
|
shared.onion_unchecked_pool.difference_update(to_connect)
|
||||||
|
if len(shared.onion_pool) > 2:
|
||||||
|
to_connect.update(random.sample(
|
||||||
|
tuple(shared.onion_pool), 2))
|
||||||
|
else:
|
||||||
|
to_connect.update(shared.onion_pool)
|
||||||
|
sample_length = 8
|
||||||
|
if len(shared.unchecked_node_pool) > sample_length:
|
||||||
to_connect.update(random.sample(
|
to_connect.update(random.sample(
|
||||||
tuple(shared.unchecked_node_pool), 16))
|
tuple(shared.unchecked_node_pool), sample_length))
|
||||||
else:
|
else:
|
||||||
to_connect.update(shared.unchecked_node_pool)
|
to_connect.update(shared.unchecked_node_pool)
|
||||||
if outgoing_connections < shared.outgoing_connections / 2:
|
if outgoing_connections < shared.outgoing_connections / 2:
|
||||||
bootstrap()
|
bootstrap()
|
||||||
shared.unchecked_node_pool.difference_update(to_connect)
|
shared.unchecked_node_pool.difference_update(to_connect)
|
||||||
if len(shared.node_pool) > 8:
|
if len(shared.node_pool) > sample_length / 2:
|
||||||
to_connect.update(random.sample(
|
to_connect.update(random.sample(
|
||||||
tuple(shared.node_pool), 8))
|
tuple(shared.node_pool), int(sample_length / 2)))
|
||||||
else:
|
else:
|
||||||
to_connect.update(shared.node_pool)
|
to_connect.update(shared.node_pool)
|
||||||
|
|
||||||
|
@ -157,10 +170,7 @@ class Manager(threading.Thread):
|
||||||
if port == 'i2p' and shared.i2p_enabled:
|
if port == 'i2p' and shared.i2p_enabled:
|
||||||
if shared.i2p_session_nick and host != shared.i2p_dest_pub:
|
if shared.i2p_session_nick and host != shared.i2p_dest_pub:
|
||||||
try:
|
try:
|
||||||
d = I2PDialer(
|
d = I2PDialer(shared, host)
|
||||||
shared,
|
|
||||||
host, shared.i2p_session_nick,
|
|
||||||
shared.i2p_sam_host, shared.i2p_sam_port)
|
|
||||||
d.start()
|
d.start()
|
||||||
hosts.add(d.destination)
|
hosts.add(d.destination)
|
||||||
shared.i2p_dialers.add(d)
|
shared.i2p_dialers.add(d)
|
||||||
|
@ -171,23 +181,14 @@ class Manager(threading.Thread):
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
connect((host, port))
|
connect(
|
||||||
|
(host, port),
|
||||||
|
Connection if not shared.socks_proxy else SocksConnection)
|
||||||
hosts.add(group)
|
hosts.add(group)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_data():
|
def load_data():
|
||||||
"""Load initial nodes and data, stored in files between sessions"""
|
"""Load initial nodes and data, stored in files between sessions"""
|
||||||
try:
|
|
||||||
with open(
|
|
||||||
os.path.join(shared.data_directory, 'objects.pickle'), 'br'
|
|
||||||
) as src:
|
|
||||||
shared.objects = pickle.load(src)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass # first start
|
|
||||||
except Exception:
|
|
||||||
logging.warning(
|
|
||||||
'Error while loading objects from disk.', exc_info=True)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(
|
with open(
|
||||||
os.path.join(shared.data_directory, 'nodes.pickle'), 'br'
|
os.path.join(shared.data_directory, 'nodes.pickle'), 'br'
|
||||||
|
@ -210,6 +211,17 @@ class Manager(threading.Thread):
|
||||||
logging.warning(
|
logging.warning(
|
||||||
'Error while loading nodes from disk.', exc_info=True)
|
'Error while loading nodes from disk.', exc_info=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
os.path.join(shared.data_directory, 'onion_nodes.pickle'), 'br'
|
||||||
|
) as src:
|
||||||
|
shared.onion_pool = pickle.load(src)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
logging.warning(
|
||||||
|
'Error while loading nodes from disk.', exc_info=True)
|
||||||
|
|
||||||
with open(
|
with open(
|
||||||
os.path.join(shared.source_directory, 'core_nodes.csv'),
|
os.path.join(shared.source_directory, 'core_nodes.csv'),
|
||||||
'r', newline='', encoding='ascii'
|
'r', newline='', encoding='ascii'
|
||||||
|
@ -227,19 +239,6 @@ class Manager(threading.Thread):
|
||||||
(row[0].encode(), 'i2p') for row in reader}
|
(row[0].encode(), 'i2p') for row in reader}
|
||||||
shared.i2p_node_pool.update(shared.i2p_core_nodes)
|
shared.i2p_node_pool.update(shared.i2p_core_nodes)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def pickle_objects():
|
|
||||||
"""Save objects into a file objects.pickle in the data directory"""
|
|
||||||
try:
|
|
||||||
with open(
|
|
||||||
os.path.join(shared.data_directory, 'objects.pickle'), 'bw'
|
|
||||||
) as dst:
|
|
||||||
with shared.objects_lock:
|
|
||||||
pickle.dump(shared.objects, dst, protocol=3)
|
|
||||||
logging.debug('Saved objects')
|
|
||||||
except Exception:
|
|
||||||
logging.warning('Error while saving objects', exc_info=True)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def pickle_nodes():
|
def pickle_nodes():
|
||||||
"""Save nodes into files in the data directory"""
|
"""Save nodes into files in the data directory"""
|
||||||
|
@ -257,6 +256,13 @@ class Manager(threading.Thread):
|
||||||
shared.i2p_unchecked_node_pool = set(random.sample(
|
shared.i2p_unchecked_node_pool = set(random.sample(
|
||||||
tuple(shared.i2p_unchecked_node_pool), 100))
|
tuple(shared.i2p_unchecked_node_pool), 100))
|
||||||
|
|
||||||
|
if len(shared.onion_pool) > 1000:
|
||||||
|
shared.onion_pool = set(
|
||||||
|
random.sample(shared.onion_pool, 1000))
|
||||||
|
if len(shared.onion_unchecked_pool) > 100:
|
||||||
|
shared.onion_unchecked_pool = set(
|
||||||
|
random.sample(shared.onion_unchecked_pool, 100))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(
|
with open(
|
||||||
os.path.join(shared.data_directory, 'nodes.pickle'), 'bw'
|
os.path.join(shared.data_directory, 'nodes.pickle'), 'bw'
|
||||||
|
@ -266,7 +272,11 @@ class Manager(threading.Thread):
|
||||||
os.path.join(shared.data_directory, 'i2p_nodes.pickle'), 'bw'
|
os.path.join(shared.data_directory, 'i2p_nodes.pickle'), 'bw'
|
||||||
) as dst:
|
) as dst:
|
||||||
pickle.dump(shared.i2p_node_pool, dst, protocol=3)
|
pickle.dump(shared.i2p_node_pool, dst, protocol=3)
|
||||||
logging.debug('Saved nodes')
|
with open(
|
||||||
|
os.path.join(shared.data_directory, 'onion_nodes.pickle'), 'bw'
|
||||||
|
) as dst:
|
||||||
|
pickle.dump(shared.onion_pool, dst, protocol=3)
|
||||||
|
logging.debug('Saved nodes')
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.warning('Error while saving nodes', exc_info=True)
|
logging.warning('Error while saving nodes', exc_info=True)
|
||||||
|
|
||||||
|
@ -278,7 +288,16 @@ class Manager(threading.Thread):
|
||||||
dest_pub_raw = base64.b64decode(
|
dest_pub_raw = base64.b64decode(
|
||||||
shared.i2p_dest_pub, altchars=b'-~')
|
shared.i2p_dest_pub, altchars=b'-~')
|
||||||
obj = structure.Object(
|
obj = structure.Object(
|
||||||
b'\x00' * 8, int(time.time() + 2 * 3600),
|
int(time.time() + 2 * 3600),
|
||||||
shared.i2p_dest_obj_type, shared.i2p_dest_obj_version,
|
shared.i2p_dest_obj_type, shared.i2p_dest_obj_version,
|
||||||
shared.stream, dest_pub_raw)
|
shared.stream, object_payload=dest_pub_raw)
|
||||||
|
proofofwork.do_pow_and_publish(obj)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def publish_onion_peer():
|
||||||
|
"""Make and publish a `structure.OnionPeer`"""
|
||||||
|
if shared.onion_hostname:
|
||||||
|
logging.info('Publishing our onion peer')
|
||||||
|
obj = structure.OnionPeer(
|
||||||
|
shared.onion_hostname, shared.listening_port).to_object()
|
||||||
proofofwork.do_pow_and_publish(obj)
|
proofofwork.do_pow_and_publish(obj)
|
||||||
|
|
|
@ -114,9 +114,10 @@ def _payload_read_int(data):
|
||||||
class Version(IMessage):
|
class Version(IMessage):
|
||||||
"""The version message payload"""
|
"""The version message payload"""
|
||||||
def __init__(
|
def __init__(
|
||||||
self, host, port, protocol_version=shared.protocol_version,
|
self, host, port,
|
||||||
services=shared.services, nonce=shared.nonce,
|
nonce=shared.nonce, services=shared.services,
|
||||||
user_agent=shared.user_agent, streams=None
|
*, streams=None, user_agent=shared.user_agent,
|
||||||
|
protocol_version=shared.protocol_version,
|
||||||
):
|
):
|
||||||
self.host = host
|
self.host = host
|
||||||
self.port = port
|
self.port = port
|
||||||
|
@ -189,7 +190,8 @@ class Version(IMessage):
|
||||||
raise ValueError('malformed Version message, wrong streams_count')
|
raise ValueError('malformed Version message, wrong streams_count')
|
||||||
|
|
||||||
return cls(
|
return cls(
|
||||||
host, port, protocol_version, services, nonce, user_agent, streams)
|
host, port, nonce, services, streams=streams,
|
||||||
|
protocol_version=protocol_version, user_agent=user_agent)
|
||||||
|
|
||||||
|
|
||||||
class Inv(IMessage):
|
class Inv(IMessage):
|
||||||
|
|
|
@ -19,8 +19,13 @@ def _pow_worker(target, initial_hash, q):
|
||||||
|
|
||||||
while trial_value > target:
|
while trial_value > target:
|
||||||
nonce += 1
|
nonce += 1
|
||||||
trial_value = struct.unpack('>Q', hashlib.sha512(hashlib.sha512(
|
try:
|
||||||
struct.pack('>Q', nonce) + initial_hash).digest()).digest()[:8])[0]
|
trial_value = struct.unpack('>Q', hashlib.sha512(hashlib.sha512(
|
||||||
|
struct.pack('>Q', nonce) + initial_hash
|
||||||
|
).digest()).digest()[:8])[0]
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
q.put(None)
|
||||||
|
return
|
||||||
|
|
||||||
q.put(struct.pack('>Q', nonce))
|
q.put(struct.pack('>Q', nonce))
|
||||||
|
|
||||||
|
@ -36,17 +41,21 @@ def _worker(obj):
|
||||||
nonce = q.get()
|
nonce = q.get()
|
||||||
p.join()
|
p.join()
|
||||||
|
|
||||||
|
if nonce is None:
|
||||||
|
if not shared.shutting_down:
|
||||||
|
logging.warning('Got None nonce from _pow_worker!')
|
||||||
|
return
|
||||||
|
|
||||||
logging.debug(
|
logging.debug(
|
||||||
'Finished doing POW, nonce: %s, time: %ss', nonce, time.time() - t)
|
'Finished doing POW, nonce: %s, time: %ss', nonce, time.time() - t)
|
||||||
obj = structure.Object(
|
obj = structure.Object(
|
||||||
nonce, obj.expires_time, obj.object_type, obj.version,
|
obj.expires_time, obj.object_type, obj.version, obj.stream_number,
|
||||||
obj.stream_number, obj.object_payload)
|
object_payload=obj.object_payload, nonce=nonce)
|
||||||
logging.debug(
|
logging.debug(
|
||||||
'Object vector is %s', base64.b16encode(obj.vector).decode())
|
'Object vector is %s', base64.b16encode(obj.vector).decode())
|
||||||
|
|
||||||
with shared.objects_lock:
|
shared.objects[obj.vector] = obj
|
||||||
shared.objects[obj.vector] = obj
|
shared.vector_advertise_queue.put(obj.vector)
|
||||||
shared.vector_advertise_queue.put(obj.vector)
|
|
||||||
|
|
||||||
|
|
||||||
def do_pow_and_publish(obj):
|
def do_pow_and_publish(obj):
|
||||||
|
|
|
@ -21,11 +21,17 @@ protocol_version = 3
|
||||||
services = 3 # NODE_NETWORK, NODE_SSL
|
services = 3 # NODE_NETWORK, NODE_SSL
|
||||||
stream = 1
|
stream = 1
|
||||||
nonce = os.urandom(8)
|
nonce = os.urandom(8)
|
||||||
user_agent = b'/MiNode:0.3.3/'
|
user_agent = b'/MiNode:0.3.5/'
|
||||||
timeout = 600
|
timeout = 600
|
||||||
header_length = 24
|
header_length = 24
|
||||||
i2p_dest_obj_type = 0x493250
|
i2p_dest_obj_type = 0x493250
|
||||||
i2p_dest_obj_version = 1
|
i2p_dest_obj_version = 1
|
||||||
|
onion_obj_type = 0x746f72
|
||||||
|
onion_obj_version = 3
|
||||||
|
|
||||||
|
socks_proxy = None
|
||||||
|
tor = False
|
||||||
|
onion_hostname = ''
|
||||||
|
|
||||||
i2p_enabled = False
|
i2p_enabled = False
|
||||||
i2p_transient = False
|
i2p_transient = False
|
||||||
|
@ -54,13 +60,16 @@ core_nodes = set()
|
||||||
|
|
||||||
node_pool = set()
|
node_pool = set()
|
||||||
unchecked_node_pool = set()
|
unchecked_node_pool = set()
|
||||||
|
nonce_pool = {}
|
||||||
|
|
||||||
i2p_core_nodes = set()
|
i2p_core_nodes = set()
|
||||||
i2p_node_pool = set()
|
i2p_node_pool = set()
|
||||||
i2p_unchecked_node_pool = set()
|
i2p_unchecked_node_pool = set()
|
||||||
|
|
||||||
|
onion_pool = set()
|
||||||
|
onion_unchecked_pool = set()
|
||||||
|
|
||||||
outgoing_connections = 8
|
outgoing_connections = 8
|
||||||
connection_limit = 250
|
connection_limit = 250
|
||||||
|
|
||||||
objects = {}
|
objects = {}
|
||||||
objects_lock = threading.Lock()
|
|
||||||
|
|
257
minode/sql.py
Normal file
257
minode/sql.py
Normal file
|
@ -0,0 +1,257 @@
|
||||||
|
"""Inventory implementation using sqlite"""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sqlite3
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
from . import shared, structure
|
||||||
|
|
||||||
|
sqlite3.threadsafety = 3
|
||||||
|
|
||||||
|
|
||||||
|
class Inventory():
|
||||||
|
"""sqlite inventory"""
|
||||||
|
def __init__(self):
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
self._deleted = 0
|
||||||
|
self._last = {}
|
||||||
|
self._pending = set()
|
||||||
|
self._db = sqlite3.connect(
|
||||||
|
os.path.join(shared.data_directory, 'objects.dat'),
|
||||||
|
check_same_thread=False
|
||||||
|
)
|
||||||
|
self._db.executescript("""
|
||||||
|
BEGIN;
|
||||||
|
CREATE TABLE IF NOT EXISTS status
|
||||||
|
(key text, value integer, UNIQUE(key) ON CONFLICT REPLACE);
|
||||||
|
INSERT INTO status VALUES ('version', 1);
|
||||||
|
CREATE TABLE IF NOT EXISTS objects
|
||||||
|
(vector unique, expires integer, type integer, version integer,
|
||||||
|
stream integer, tag, data, offset integer);
|
||||||
|
COMMIT;
|
||||||
|
""")
|
||||||
|
self.rowid = len(self) or None
|
||||||
|
try:
|
||||||
|
self.lastvacuumtime = self._db.execute(
|
||||||
|
"SELECT value FROM status WHERE key='lastvacuumtime'"
|
||||||
|
).fetchone()[0]
|
||||||
|
except TypeError:
|
||||||
|
self.lastvacuumtime = int(time.time())
|
||||||
|
self._db.execute(
|
||||||
|
"INSERT INTO status VALUES ('lastvacuumtime', ?)",
|
||||||
|
(self.lastvacuumtime,)
|
||||||
|
)
|
||||||
|
self._db.commit()
|
||||||
|
self._db.row_factory = self.__object
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __object(cursor, row):
|
||||||
|
if len(cursor.description) != 8:
|
||||||
|
return row
|
||||||
|
vector, expires, obj_type, version, stream, tag, data, offset = row
|
||||||
|
return structure.Object(
|
||||||
|
expires, obj_type, version, stream,
|
||||||
|
data=data, offset=offset, tag=tag, vector=vector)
|
||||||
|
|
||||||
|
def check(self, *vectors):
|
||||||
|
"""Remove given vectors from pending"""
|
||||||
|
with self._lock:
|
||||||
|
for vector in vectors:
|
||||||
|
self._pending.discard(vector)
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
"""Remove expired objects"""
|
||||||
|
if len(self._pending) > 100:
|
||||||
|
logging.info(
|
||||||
|
'Not cleaning up, %s objects pending', len(self._pending))
|
||||||
|
return
|
||||||
|
for vector in set(self._last):
|
||||||
|
if self._last[vector].is_expired():
|
||||||
|
logging.debug(
|
||||||
|
'Deleted expired object: %s',
|
||||||
|
base64.b16encode(vector).decode())
|
||||||
|
with self._lock:
|
||||||
|
del self._last[vector]
|
||||||
|
if len(self._last) > 1000:
|
||||||
|
self.flush()
|
||||||
|
return
|
||||||
|
with self._lock:
|
||||||
|
now = int(time.time())
|
||||||
|
cur = self._db.execute(
|
||||||
|
'DELETE FROM objects WHERE expires < ?', (now - 3 * 3600,))
|
||||||
|
self._db.commit()
|
||||||
|
self._deleted += cur.rowcount
|
||||||
|
(logging.info if self._pending else logging.debug)(
|
||||||
|
'Deleted %s expired objects, %s pending',
|
||||||
|
cur.rowcount, len(self._pending))
|
||||||
|
# conditional vacuum and validity check (TODO)
|
||||||
|
# every 24 hours or after deleting a lot of items
|
||||||
|
if self._deleted > 10000 or self.lastvacuumtime < now - 86400:
|
||||||
|
logging.info('Doing VACUUM for objects')
|
||||||
|
cur.execute('VACUUM')
|
||||||
|
cur.execute(
|
||||||
|
"INSERT INTO status VALUES ('lastvacuumtime', ?)", (now,))
|
||||||
|
self._db.commit()
|
||||||
|
self._deleted = 0
|
||||||
|
self.lastvacuumtime = now
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
"""Write cached objects to the database"""
|
||||||
|
with self._lock:
|
||||||
|
cur = self._db.executemany(
|
||||||
|
'INSERT INTO objects VALUES (?,?,?,?,?,?,?,?)',
|
||||||
|
((obj.vector, obj.expires_time, obj.object_type,
|
||||||
|
obj.version, obj.stream_number, obj.tag, obj.data,
|
||||||
|
obj.offset) for obj in self._last.values()))
|
||||||
|
self._db.commit()
|
||||||
|
self.rowid = cur.lastrowid
|
||||||
|
self._last = {}
|
||||||
|
|
||||||
|
def filter(self, stream=None, object_type=None, tag=None):
|
||||||
|
"""Generator of objects with the given parameters"""
|
||||||
|
def fits(obj):
|
||||||
|
if stream and obj.stream_number != stream:
|
||||||
|
return False
|
||||||
|
if object_type is not None and obj.object_type != object_type:
|
||||||
|
return False
|
||||||
|
if tag and obj.tag != tag:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
yield from filter(fits, self._last.values())
|
||||||
|
|
||||||
|
clauses = []
|
||||||
|
if stream:
|
||||||
|
clauses.append(('stream = ?', stream))
|
||||||
|
if object_type is not None:
|
||||||
|
clauses.append(('type = ?', object_type))
|
||||||
|
if tag:
|
||||||
|
clauses.append(('tag = ?', tag))
|
||||||
|
|
||||||
|
clauses, params = zip(*clauses)
|
||||||
|
|
||||||
|
yield from self._db.execute(
|
||||||
|
'SELECT * FROM objects WHERE ' # nosec B608
|
||||||
|
+ ' AND '.join(clauses), params)
|
||||||
|
|
||||||
|
def select(self, vectors):
|
||||||
|
"""Select new vectors from the given set"""
|
||||||
|
chunk_size = 999
|
||||||
|
with self._lock:
|
||||||
|
vectors.difference_update(self._last)
|
||||||
|
keys = tuple(vectors)
|
||||||
|
for i in range(0, len(vectors), chunk_size):
|
||||||
|
chunk = keys[i:i+chunk_size]
|
||||||
|
cur = self._db.execute(
|
||||||
|
'SELECT vector FROM objects WHERE vector IN' # nosec B608
|
||||||
|
' ({})'.format(','.join('?' * len(chunk))),
|
||||||
|
chunk)
|
||||||
|
for v, in cur:
|
||||||
|
vectors.remove(v)
|
||||||
|
self._pending.update(vectors)
|
||||||
|
return vectors
|
||||||
|
|
||||||
|
def biginv_chunks(self, chunk_size=10000, stream=None):
|
||||||
|
"""Generator of vector lists for making the biginv"""
|
||||||
|
if stream is None:
|
||||||
|
stream = shared.stream
|
||||||
|
now = int(time.time())
|
||||||
|
cur = self._db.execute(
|
||||||
|
'SELECT vector FROM objects WHERE expires > ? AND stream = ?'
|
||||||
|
' ORDER BY random()', (now, stream)
|
||||||
|
)
|
||||||
|
cur.arraysize = chunk_size
|
||||||
|
while True:
|
||||||
|
vectors = cur.fetchmany()
|
||||||
|
if not vectors:
|
||||||
|
# TODO: append to the last short result,
|
||||||
|
# check that _last is shorter than the chunk_size
|
||||||
|
# (should be < 1000)
|
||||||
|
if self._last:
|
||||||
|
yield [
|
||||||
|
obj.vector for obj in self._last.values()
|
||||||
|
if obj.stream_number == stream
|
||||||
|
and obj.expires_time > now]
|
||||||
|
return
|
||||||
|
yield [v for v, in vectors]
|
||||||
|
|
||||||
|
def get(self, vector, default=None):
|
||||||
|
try:
|
||||||
|
return self[vector]
|
||||||
|
except KeyError:
|
||||||
|
return default
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
yield from self._last
|
||||||
|
for vector, in self._db.execute('SELECT vector FROM objects'):
|
||||||
|
yield vector
|
||||||
|
|
||||||
|
def values(self):
|
||||||
|
yield from self._last.values()
|
||||||
|
yield from self._db.execute('SELECT * FROM objects')
|
||||||
|
|
||||||
|
def popitem(self):
|
||||||
|
try:
|
||||||
|
return self._last.popitem()
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
if not self.rowid:
|
||||||
|
raise KeyError('empty')
|
||||||
|
cur = self._db.execute(
|
||||||
|
'SELECT vector FROM objects WHERE ROWID = ?', (self.rowid,))
|
||||||
|
vector = cur.fetchone()[0]
|
||||||
|
obj = self.get(vector)
|
||||||
|
del self[vector]
|
||||||
|
return (vector, obj)
|
||||||
|
|
||||||
|
def __contains__(self, vector):
|
||||||
|
if vector in self._last:
|
||||||
|
return True
|
||||||
|
return self._db.execute(
|
||||||
|
'SELECT vector FROM objects WHERE vector = ?', (vector,)
|
||||||
|
).fetchone() is not None
|
||||||
|
|
||||||
|
def __getitem__(self, vector):
|
||||||
|
try:
|
||||||
|
return self._last[vector]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
item = self._db.execute(
|
||||||
|
'SELECT * FROM objects WHERE vector = ?', (vector,)).fetchone()
|
||||||
|
if item is None:
|
||||||
|
raise KeyError(vector)
|
||||||
|
return item
|
||||||
|
|
||||||
|
def __delitem__(self, vector):
|
||||||
|
try:
|
||||||
|
del self._last[vector]
|
||||||
|
return
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
with self._lock: # KeyError
|
||||||
|
self._db.execute('DELETE FROM objects WHERE vector = ?', (vector,))
|
||||||
|
self._db.commit()
|
||||||
|
self.rowid = len(self)
|
||||||
|
|
||||||
|
def __setitem__(self, vector, obj):
|
||||||
|
if vector in self:
|
||||||
|
return
|
||||||
|
with self._lock:
|
||||||
|
self._last[vector] = obj
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
if self._last:
|
||||||
|
return True
|
||||||
|
return self._db.execute(
|
||||||
|
'SELECT vector from objects LIMIT 1').fetchone() is not None
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
cur = self._db.execute('SELECT count(*) FROM objects')
|
||||||
|
return cur.fetchone()[0] + len(self._last)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
self.flush()
|
||||||
|
self._db.close()
|
|
@ -1,8 +1,10 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""Protocol structures"""
|
"""Protocol structures"""
|
||||||
import base64
|
import base64
|
||||||
|
import binascii
|
||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
import socket
|
import socket
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
|
@ -62,21 +64,26 @@ class VarInt(IStructure):
|
||||||
class Object():
|
class Object():
|
||||||
"""The 'object' message payload"""
|
"""The 'object' message payload"""
|
||||||
def __init__(
|
def __init__(
|
||||||
self, nonce, expires_time, object_type, version,
|
self, expires_time, object_type, version, stream_number,
|
||||||
stream_number, object_payload
|
*, data=None, offset=None, object_payload=None,
|
||||||
|
tag=None, nonce=b'\x00' * 8, vector=None
|
||||||
):
|
):
|
||||||
self.nonce = nonce
|
self.nonce = nonce
|
||||||
self.expires_time = expires_time
|
self.expires_time = expires_time
|
||||||
self.object_type = object_type
|
self.object_type = object_type
|
||||||
self.version = version
|
self.version = version
|
||||||
self.stream_number = stream_number
|
self.stream_number = stream_number
|
||||||
self.object_payload = object_payload
|
if not data:
|
||||||
self.vector = hashlib.sha512(hashlib.sha512(
|
data, offset = self.to_bytes(object_payload)
|
||||||
self.to_bytes()).digest()).digest()[:32]
|
self.data = data
|
||||||
|
self.offset = offset
|
||||||
|
self.vector = vector or hashlib.sha512(hashlib.sha512(
|
||||||
|
self.data).digest()).digest()[:32]
|
||||||
|
|
||||||
self.tag = (
|
self.tag = tag or (
|
||||||
# broadcast from version 5 and pubkey/getpukey from version 4
|
# broadcast from version 5 and pubkey/getpukey from version 4
|
||||||
self.object_payload[:32] if object_type == 3 and version == 5
|
(object_payload or self.object_payload)[:32]
|
||||||
|
if object_type == 3 and version == 5
|
||||||
or (object_type in (0, 1) and version == 4)
|
or (object_type in (0, 1) and version == 4)
|
||||||
else None)
|
else None)
|
||||||
|
|
||||||
|
@ -87,29 +94,33 @@ class Object():
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_message(cls, m):
|
def from_message(cls, m):
|
||||||
"""Decode message payload"""
|
"""Decode message payload"""
|
||||||
payload = m.payload
|
data = m.payload
|
||||||
nonce, expires_time, object_type = struct.unpack('>8sQL', payload[:20])
|
nonce, expires_time, object_type = struct.unpack('>8sQL', data[:20])
|
||||||
payload = payload[20:]
|
version_varint_length = VarInt.length(data[20])
|
||||||
version_varint_length = VarInt.length(payload[0])
|
offset = 20 + version_varint_length
|
||||||
version = VarInt.from_bytes(payload[:version_varint_length]).n
|
version = VarInt.from_bytes(data[20:offset]).n
|
||||||
payload = payload[version_varint_length:]
|
stream_number_varint_length = VarInt.length(data[offset])
|
||||||
stream_number_varint_length = VarInt.length(payload[0])
|
|
||||||
stream_number = VarInt.from_bytes(
|
stream_number = VarInt.from_bytes(
|
||||||
payload[:stream_number_varint_length]).n
|
data[offset:offset+stream_number_varint_length]).n
|
||||||
payload = payload[stream_number_varint_length:]
|
offset += stream_number_varint_length
|
||||||
return cls(
|
return cls(
|
||||||
nonce, expires_time, object_type, version, stream_number, payload)
|
expires_time, object_type, version, stream_number,
|
||||||
|
data=data, offset=offset, nonce=nonce
|
||||||
|
)
|
||||||
|
|
||||||
def to_bytes(self):
|
@property
|
||||||
"""Serialize to bytes object payload"""
|
def object_payload(self):
|
||||||
|
return self.data[self.offset:]
|
||||||
|
|
||||||
|
def to_bytes(self, object_payload):
|
||||||
|
"""Serialize to bytes"""
|
||||||
payload = b''
|
payload = b''
|
||||||
payload += self.nonce
|
payload += self.nonce
|
||||||
payload += struct.pack('>QL', self.expires_time, self.object_type)
|
payload += struct.pack('>QL', self.expires_time, self.object_type)
|
||||||
payload += (
|
payload += (
|
||||||
VarInt(self.version).to_bytes()
|
VarInt(self.version).to_bytes()
|
||||||
+ VarInt(self.stream_number).to_bytes())
|
+ VarInt(self.stream_number).to_bytes())
|
||||||
payload += self.object_payload
|
return payload + object_payload, len(payload)
|
||||||
return payload
|
|
||||||
|
|
||||||
def is_expired(self):
|
def is_expired(self):
|
||||||
"""Check if object's TTL is expired"""
|
"""Check if object's TTL is expired"""
|
||||||
|
@ -152,7 +163,7 @@ class Object():
|
||||||
|
|
||||||
def pow_target(self):
|
def pow_target(self):
|
||||||
"""Compute PoW target"""
|
"""Compute PoW target"""
|
||||||
data = self.to_bytes()[8:]
|
data = self.data[8:]
|
||||||
length = len(data) + 8 + shared.payload_length_extra_bytes
|
length = len(data) + 8 + shared.payload_length_extra_bytes
|
||||||
dt = max(self.expires_time - time.time(), 0)
|
dt = max(self.expires_time - time.time(), 0)
|
||||||
return int(
|
return int(
|
||||||
|
@ -162,7 +173,7 @@ class Object():
|
||||||
|
|
||||||
def pow_initial_hash(self):
|
def pow_initial_hash(self):
|
||||||
"""Compute the initial hash for PoW"""
|
"""Compute the initial hash for PoW"""
|
||||||
return hashlib.sha512(self.to_bytes()[8:]).digest()
|
return hashlib.sha512(self.data[8:]).digest()
|
||||||
|
|
||||||
|
|
||||||
class NetAddrNoPrefix(IStructure):
|
class NetAddrNoPrefix(IStructure):
|
||||||
|
@ -227,7 +238,7 @@ class NetAddr(IStructure):
|
||||||
|
|
||||||
def to_bytes(self):
|
def to_bytes(self):
|
||||||
b = b''
|
b = b''
|
||||||
b += struct.pack('>Q', int(time.time()))
|
b += struct.pack('>Q', int(time.time() - 5400))
|
||||||
b += struct.pack('>I', self.stream)
|
b += struct.pack('>I', self.stream)
|
||||||
b += NetAddrNoPrefix(self.services, self.host, self.port).to_bytes()
|
b += NetAddrNoPrefix(self.services, self.host, self.port).to_bytes()
|
||||||
return b
|
return b
|
||||||
|
@ -237,3 +248,42 @@ class NetAddr(IStructure):
|
||||||
stream, net_addr = struct.unpack('>QI26s', b)[1:]
|
stream, net_addr = struct.unpack('>QI26s', b)[1:]
|
||||||
n = NetAddrNoPrefix.from_bytes(net_addr)
|
n = NetAddrNoPrefix.from_bytes(net_addr)
|
||||||
return cls(n.services, n.host, n.port, stream)
|
return cls(n.services, n.host, n.port, stream)
|
||||||
|
|
||||||
|
|
||||||
|
class OnionPeer():
|
||||||
|
"""An object, containing onion peer info"""
|
||||||
|
def __init__(self, host, port=8444, stream=None, dest_pub=None):
|
||||||
|
self.stream = stream or shared.stream
|
||||||
|
self.host = host
|
||||||
|
self.port = port
|
||||||
|
try:
|
||||||
|
self.dest_pub = dest_pub or base64.b32decode(
|
||||||
|
re.search(r'(.*)\.onion', host).groups()[0], True)
|
||||||
|
except (AttributeError, binascii.Error) as e:
|
||||||
|
raise ValueError('Malformed hostname') from e
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return 'onion_peer, stream: {}, host: {}, port {}'.format(
|
||||||
|
self.stream, self.host, self.port)
|
||||||
|
|
||||||
|
def to_object(self):
|
||||||
|
"""Encode the onion peer to the `Object`"""
|
||||||
|
payload = b''
|
||||||
|
payload += VarInt(self.port).to_bytes()
|
||||||
|
payload += b'\xfd\x87\xd8\x7e\xeb\x43'
|
||||||
|
payload += self.dest_pub
|
||||||
|
return Object(
|
||||||
|
int(time.time() + 8 * 3600), shared.onion_obj_type,
|
||||||
|
shared.onion_obj_version, self.stream, object_payload=payload)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_object(cls, obj):
|
||||||
|
"""Decode the onion peer from an `Object` instance"""
|
||||||
|
payload = obj.object_payload
|
||||||
|
port_length = VarInt.length(payload[0])
|
||||||
|
port = VarInt.from_bytes(payload[:port_length]).n
|
||||||
|
if payload[port_length:port_length + 6] != b'\xfd\x87\xd8\x7e\xeb\x43':
|
||||||
|
raise ValueError('Malformed onion peer object')
|
||||||
|
dest_pub = payload[port_length + 6:]
|
||||||
|
host = base64.b32encode(dest_pub).lower().decode() + '.onion'
|
||||||
|
return cls(host, port, obj.stream_number, dest_pub)
|
||||||
|
|
9
minode/tests/common.py
Normal file
9
minode/tests/common.py
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
"""Common expressions for the tests"""
|
||||||
|
|
||||||
|
import socket
|
||||||
|
|
||||||
|
try:
|
||||||
|
socket.socket().bind(('127.0.0.1', 9050))
|
||||||
|
tor_port_free = True
|
||||||
|
except (OSError, socket.error):
|
||||||
|
tor_port_free = False
|
|
@ -8,7 +8,7 @@ import tempfile
|
||||||
import time
|
import time
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
|
||||||
from minode import connection, main, shared
|
from minode import connection, main, shared, sql
|
||||||
from minode.listener import Listener
|
from minode.listener import Listener
|
||||||
from minode.manager import Manager
|
from minode.manager import Manager
|
||||||
|
|
||||||
|
@ -66,11 +66,14 @@ class TestNetwork(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
shared.core_nodes.clear()
|
shared.core_nodes.clear()
|
||||||
shared.unchecked_node_pool.clear()
|
shared.unchecked_node_pool.clear()
|
||||||
shared.objects = {}
|
|
||||||
try:
|
try:
|
||||||
os.remove(os.path.join(shared.data_directory, 'objects.pickle'))
|
os.remove(os.path.join(shared.data_directory, 'objects.dat'))
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
shared.objects = sql.Inventory()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
shared.objects.flush()
|
||||||
|
|
||||||
def _make_initial_nodes(self):
|
def _make_initial_nodes(self):
|
||||||
Manager.load_data()
|
Manager.load_data()
|
||||||
|
|
165
minode/tests/test_objects.py
Normal file
165
minode/tests/test_objects.py
Normal file
|
@ -0,0 +1,165 @@
|
||||||
|
"""Tests for the Inventory implementation"""
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from minode import sql, shared, structure
|
||||||
|
|
||||||
|
|
||||||
|
# + __bool__
|
||||||
|
# + __contains__
|
||||||
|
# + __getitem__
|
||||||
|
# + __setitem__
|
||||||
|
# = cleanup
|
||||||
|
# + get
|
||||||
|
# + filter
|
||||||
|
# = select
|
||||||
|
# + biginv_chunks
|
||||||
|
|
||||||
|
|
||||||
|
class TestObjects():
|
||||||
|
"""
|
||||||
|
A base class for the test case for shared.objects,
|
||||||
|
containing tests for all the methods directly used in code.
|
||||||
|
"""
|
||||||
|
# pylint: disable=no-member
|
||||||
|
# A possibility of abstract test cases was rejected:
|
||||||
|
# https://bugs.python.org/issue17519
|
||||||
|
|
||||||
|
def test_set_get(self):
|
||||||
|
"""Put some objects and check presence and getting"""
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()), 42, 1, 1, object_payload=b'HELLO')
|
||||||
|
self.assertFalse(obj.vector in self.objects)
|
||||||
|
with self.assertRaises(KeyError):
|
||||||
|
self.objects[obj.vector] # pylint: disable=pointless-statement
|
||||||
|
self.assertIsNone(self.objects.get(obj.vector))
|
||||||
|
prev_len = len(self.objects)
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
self.assertTrue(self.objects)
|
||||||
|
self.assertEqual(len(self.objects), prev_len + 1)
|
||||||
|
self.assertTrue(obj.vector in self.objects)
|
||||||
|
obj1 = self.objects[obj.vector]
|
||||||
|
self.assertEqual(obj.vector, obj1.vector)
|
||||||
|
self.assertEqual(obj.data, obj1.data)
|
||||||
|
|
||||||
|
def test_biginv_chunks(self):
|
||||||
|
"""Check vectors_to_send method"""
|
||||||
|
needed = set()
|
||||||
|
for _ in range(10):
|
||||||
|
# wrong stream
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()) + 10, 42, 1, random.randint(1, 3),
|
||||||
|
object_payload=os.urandom(32))
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
# expired
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()) - 10, 42, 1, 4, object_payload=os.urandom(32))
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
# interesting
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()) + 10, 42, 1, 4, object_payload=os.urandom(32))
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
needed.add(obj.vector)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set(next(self.objects.biginv_chunks(stream=4))), needed)
|
||||||
|
self.assertTrue(
|
||||||
|
set(next(self.objects.biginv_chunks())).difference(needed))
|
||||||
|
|
||||||
|
def test_filter(self):
|
||||||
|
"""Check the objects filtering"""
|
||||||
|
needed = set()
|
||||||
|
tagged = set()
|
||||||
|
tag = b'@' * 32
|
||||||
|
for _ in range(10):
|
||||||
|
# wrong type
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()), 0, 1, 5, object_payload=os.urandom(64))
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
# wrong type, but the proper tag
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()) - 11000, 0, 4, random.choice([1, 2, 3, 5]),
|
||||||
|
object_payload=tag + os.urandom(32))
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
tagged.add(obj.vector)
|
||||||
|
# wrong stream
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()), 33, 1, 1, object_payload=os.urandom(32))
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
# interesting
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()) - 11000, 33, 1, 5,
|
||||||
|
object_payload=os.urandom(32))
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
needed.add(obj.vector)
|
||||||
|
|
||||||
|
# stream and type
|
||||||
|
self.assertTrue(needed)
|
||||||
|
for obj in self.objects.filter(5, 33):
|
||||||
|
needed.remove(obj.vector)
|
||||||
|
self.assertFalse(needed)
|
||||||
|
|
||||||
|
# tag
|
||||||
|
self.assertTrue(tagged)
|
||||||
|
for obj in self.objects.filter(tag=tag):
|
||||||
|
tagged.remove(obj.vector)
|
||||||
|
self.assertFalse(tagged)
|
||||||
|
|
||||||
|
def test_cleanup(self):
|
||||||
|
"""Check cleaning up"""
|
||||||
|
for _ in range(10):
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()) - random.randint(4, 5) * 3600,
|
||||||
|
42, 1, 6, object_payload=os.urandom(32))
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()) - 2 * 3600,
|
||||||
|
42, 1, 6, object_payload=os.urandom(32))
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
|
||||||
|
for obj in self.objects.values():
|
||||||
|
if obj.is_expired():
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
self.fail('No objects found to delete')
|
||||||
|
|
||||||
|
self.objects.cleanup()
|
||||||
|
self.assertTrue(self.objects)
|
||||||
|
for obj in self.objects.values():
|
||||||
|
self.assertFalse(obj.is_expired())
|
||||||
|
|
||||||
|
def test_select(self):
|
||||||
|
"""Check the select method"""
|
||||||
|
pending = set()
|
||||||
|
questionable = set()
|
||||||
|
|
||||||
|
for _ in range(5):
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()) - 10, 42, 1, 7, object_payload=os.urandom(32))
|
||||||
|
questionable.add(obj.vector)
|
||||||
|
self.objects[obj.vector] = obj
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time()) + 10, 42, 1, 7, object_payload=os.urandom(32))
|
||||||
|
questionable.add(obj.vector)
|
||||||
|
pending.add(obj.vector)
|
||||||
|
|
||||||
|
self.assertEqual(self.objects.select(questionable), pending)
|
||||||
|
|
||||||
|
|
||||||
|
class TestObjectsSQL(TestObjects, unittest.TestCase):
|
||||||
|
"""A test case for the sqlite inventory"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setUpClass(cls):
|
||||||
|
shared.data_directory = tempfile.gettempdir()
|
||||||
|
cls.tearDownClass()
|
||||||
|
cls.objects = sql.Inventory()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tearDownClass(cls):
|
||||||
|
cls.objects = None
|
||||||
|
os.remove(os.path.join(shared.data_directory, 'objects.dat'))
|
|
@ -13,6 +13,8 @@ import psutil
|
||||||
from minode.i2p import util
|
from minode.i2p import util
|
||||||
from minode.structure import NetAddrNoPrefix
|
from minode.structure import NetAddrNoPrefix
|
||||||
|
|
||||||
|
from .common import tor_port_free
|
||||||
|
|
||||||
try:
|
try:
|
||||||
socket.socket().bind(('127.0.0.1', 7656))
|
socket.socket().bind(('127.0.0.1', 7656))
|
||||||
i2p_port_free = True
|
i2p_port_free = True
|
||||||
|
@ -185,3 +187,21 @@ class TestProcessI2P(TestProcess):
|
||||||
class TestProcessNoI2P(TestProcessShutdown):
|
class TestProcessNoI2P(TestProcessShutdown):
|
||||||
"""Test minode process shutdown with --i2p and no IP"""
|
"""Test minode process shutdown with --i2p and no IP"""
|
||||||
_process_cmd = ['minode', '--i2p', '--no-ip']
|
_process_cmd = ['minode', '--i2p', '--no-ip']
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skipIf(tor_port_free, 'No running tor detected')
|
||||||
|
class TestProcessTor(TestProcessProto):
|
||||||
|
"""A test case for minode process running with tor enabled"""
|
||||||
|
_process_cmd = ['minode', '--tor']
|
||||||
|
_wait_time = 60
|
||||||
|
|
||||||
|
def test_connections(self):
|
||||||
|
"""Check minode process connections"""
|
||||||
|
for _ in range(self._wait_time):
|
||||||
|
time.sleep(0.5)
|
||||||
|
connections = self.connections()
|
||||||
|
for c in connections:
|
||||||
|
self.assertEqual(c.raddr[0], '127.0.0.1')
|
||||||
|
self.assertEqual(c.raddr[1], 9050)
|
||||||
|
if len(connections) > self._connection_limit / 2:
|
||||||
|
break
|
||||||
|
|
77
minode/tests/test_proofofwork.py
Normal file
77
minode/tests/test_proofofwork.py
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
"""Special tests for PoW"""
|
||||||
|
import base64
|
||||||
|
import logging
|
||||||
|
import multiprocessing
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import signal
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from minode import proofofwork, shared, structure
|
||||||
|
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.DEBUG,
|
||||||
|
format='[%(asctime)s] [%(levelname)s] %(message)s')
|
||||||
|
|
||||||
|
multiprocessing.set_start_method('spawn')
|
||||||
|
|
||||||
|
|
||||||
|
class TestProofofwork(unittest.TestCase):
|
||||||
|
"""Test components of proof of work"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
shared.objects = {}
|
||||||
|
|
||||||
|
def test_proofofwork(self):
|
||||||
|
"""Check the main proofofwork call and worker"""
|
||||||
|
shared.vector_advertise_queue = queue.Queue()
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time() + 300), 42, 1,
|
||||||
|
shared.stream, object_payload=b'HELLO')
|
||||||
|
start_time = time.time()
|
||||||
|
proofofwork.do_pow_and_publish(obj)
|
||||||
|
try:
|
||||||
|
vector = shared.vector_advertise_queue.get(timeout=300)
|
||||||
|
except queue.Empty:
|
||||||
|
self.fail("Couldn't make work in 300 sec")
|
||||||
|
else:
|
||||||
|
time.sleep(1)
|
||||||
|
try:
|
||||||
|
result = shared.objects[vector]
|
||||||
|
except KeyError:
|
||||||
|
self.fail(
|
||||||
|
"Couldn't found object with vector %s"
|
||||||
|
" %s sec after pow start" % (
|
||||||
|
base64.b16encode(vector), time.time() - start_time))
|
||||||
|
self.assertTrue(result.is_valid())
|
||||||
|
self.assertEqual(result.object_type, 42)
|
||||||
|
self.assertEqual(result.object_payload, b'HELLO')
|
||||||
|
|
||||||
|
q = queue.Queue()
|
||||||
|
# pylint: disable=protected-access
|
||||||
|
proofofwork._pow_worker(obj.pow_target(), obj.pow_initial_hash(), q)
|
||||||
|
try:
|
||||||
|
nonce = q.get(timeout=5)
|
||||||
|
except queue.Empty:
|
||||||
|
self.fail("No nonce found in the queue")
|
||||||
|
|
||||||
|
obj = structure.Object(
|
||||||
|
obj.expires_time, obj.object_type, obj.version, obj.stream_number,
|
||||||
|
object_payload=obj.object_payload, nonce=nonce)
|
||||||
|
self.assertTrue(obj.is_valid())
|
||||||
|
|
||||||
|
def test_interrupt(self):
|
||||||
|
"""Send signals to _pow_worker process"""
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time() + 300), 42, 1, 2, object_payload=os.urandom(4096))
|
||||||
|
# pylint: disable=protected-access
|
||||||
|
threading.Thread(target=proofofwork._worker, args=(obj, )).start()
|
||||||
|
time.sleep(1)
|
||||||
|
worker = multiprocessing.active_children()[0]
|
||||||
|
# worker.terminate()
|
||||||
|
os.kill(worker.pid, signal.SIGINT)
|
||||||
|
time.sleep(1)
|
||||||
|
self.assertFalse(worker.is_alive())
|
|
@ -1,7 +1,5 @@
|
||||||
"""Tests for structures"""
|
"""Tests for structures"""
|
||||||
import base64
|
|
||||||
import logging
|
import logging
|
||||||
import queue
|
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
import unittest
|
import unittest
|
||||||
|
@ -18,11 +16,15 @@ sample_addr_data = unhexlify(
|
||||||
'260753000201300000000000000057ae1f90')
|
'260753000201300000000000000057ae1f90')
|
||||||
|
|
||||||
# data for an object with expires_time 1697063939
|
# data for an object with expires_time 1697063939
|
||||||
# structure.Object(
|
# structure.Object(expires_time, 42, 1, 2, object_payload=b'HELLO').data
|
||||||
# b'\x00' * 8, expires_time, 42, 1, 2, b'HELLO').to_bytes()
|
|
||||||
sample_object_data = unhexlify(
|
sample_object_data = unhexlify(
|
||||||
'000000000000000000000000652724030000002a010248454c4c4f')
|
'000000000000000000000000652724030000002a010248454c4c4f')
|
||||||
|
|
||||||
|
sample_object_expires = 1697063939
|
||||||
|
|
||||||
|
sample_onion_host = \
|
||||||
|
'bmtestlmgmvpbsg7kzmrxu47chs3cdou2tj4t5iloocgujzsf3e7rbqd.onion'
|
||||||
|
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
level=shared.log_level,
|
level=shared.log_level,
|
||||||
format='[%(asctime)s] [%(levelname)s] %(message)s')
|
format='[%(asctime)s] [%(levelname)s] %(message)s')
|
||||||
|
@ -131,17 +133,24 @@ class TestStructure(unittest.TestCase):
|
||||||
message.Message(b'object', sample_object_data))
|
message.Message(b'object', sample_object_data))
|
||||||
self.assertEqual(obj.object_type, 42)
|
self.assertEqual(obj.object_type, 42)
|
||||||
self.assertEqual(obj.stream_number, 2)
|
self.assertEqual(obj.stream_number, 2)
|
||||||
self.assertEqual(obj.expires_time, 1697063939)
|
self.assertEqual(obj.expires_time, sample_object_expires)
|
||||||
self.assertEqual(obj.object_payload, b'HELLO')
|
self.assertEqual(obj.object_payload, b'HELLO')
|
||||||
|
|
||||||
obj = structure.Object(
|
obj = structure.Object(
|
||||||
b'\x00' * 8, int(time.time() + 3000000), 42, 1, 1, b'HELLO')
|
sample_object_expires, 42, 1, 2, object_payload=b'HELLO')
|
||||||
|
self.assertEqual(obj.data, sample_object_data)
|
||||||
|
self.assertEqual(obj.offset, 22)
|
||||||
|
self.assertEqual(obj.nonce, b'\x00' * 8)
|
||||||
|
self.assertTrue(obj.is_expired())
|
||||||
|
|
||||||
|
obj = structure.Object(
|
||||||
|
int(time.time() + 3000000), 42, 1, 1, object_payload=b'HELLO')
|
||||||
self.assertFalse(obj.is_valid())
|
self.assertFalse(obj.is_valid())
|
||||||
obj.expires_time = int(time.time() - 11000)
|
obj.expires_time = int(time.time() - 11000)
|
||||||
self.assertFalse(obj.is_valid())
|
self.assertFalse(obj.is_valid())
|
||||||
|
|
||||||
obj = structure.Object(
|
obj = structure.Object(
|
||||||
b'\x00' * 8, int(time.time() + 300), 42, 1, 2, b'HELLO')
|
int(time.time() + 300), 42, 1, 2, object_payload=b'HELLO')
|
||||||
vector = obj.vector
|
vector = obj.vector
|
||||||
proofofwork._worker(obj) # pylint: disable=protected-access
|
proofofwork._worker(obj) # pylint: disable=protected-access
|
||||||
obj = shared.objects.popitem()[1]
|
obj = shared.objects.popitem()[1]
|
||||||
|
@ -151,44 +160,25 @@ class TestStructure(unittest.TestCase):
|
||||||
shared.stream = 2
|
shared.stream = 2
|
||||||
self.assertTrue(obj.is_valid())
|
self.assertTrue(obj.is_valid())
|
||||||
|
|
||||||
obj.object_payload = \
|
# obj.data = struct.pack(...
|
||||||
b'TIGER, tiger, burning bright. In the forests of the night'
|
# obj.object_payload = \
|
||||||
self.assertFalse(obj.is_valid())
|
# b'TIGER, tiger, burning bright. In the forests of the night'
|
||||||
|
# self.assertFalse(obj.is_valid())
|
||||||
|
|
||||||
def test_proofofwork(self):
|
def test_onion_peer(self):
|
||||||
"""Check the main proofofwork call and worker"""
|
"""Make an onion peer object and decode it back"""
|
||||||
shared.vector_advertise_queue = queue.Queue()
|
with self.assertRaises(ValueError):
|
||||||
obj = structure.Object(
|
onion_peer = structure.OnionPeer('testing2')
|
||||||
b'\x00' * 8, int(time.time() + 300), 42, 1,
|
with self.assertRaises(ValueError):
|
||||||
shared.stream, b'HELLO')
|
onion_peer = structure.OnionPeer('testing.onion')
|
||||||
start_time = time.time()
|
onion_peer = structure.OnionPeer(sample_onion_host)
|
||||||
proofofwork.do_pow_and_publish(obj)
|
self.assertEqual(onion_peer.stream, shared.stream)
|
||||||
try:
|
obj = onion_peer.to_object()
|
||||||
vector = shared.vector_advertise_queue.get(timeout=300)
|
self.assertEqual(obj.object_type, shared.onion_obj_type)
|
||||||
except queue.Empty:
|
self.assertEqual(obj.version, shared.onion_obj_version)
|
||||||
self.fail("Couldn't make work in 300 sec")
|
decoded = structure.OnionPeer.from_object(obj)
|
||||||
else:
|
self.assertEqual(decoded.dest_pub, onion_peer.dest_pub)
|
||||||
time.sleep(1)
|
self.assertEqual(decoded.port, onion_peer.port)
|
||||||
try:
|
obj.object_payload = obj.object_payload[0:1] + obj.object_payload[2:]
|
||||||
result = shared.objects[vector]
|
with self.assertRaises(ValueError):
|
||||||
except KeyError:
|
structure.OnionPeer.from_object(obj)
|
||||||
self.fail(
|
|
||||||
"Couldn't found object with vector %s"
|
|
||||||
" %s sec after pow start" % (
|
|
||||||
base64.b16encode(vector), time.time() - start_time))
|
|
||||||
self.assertTrue(result.is_valid())
|
|
||||||
self.assertEqual(result.object_type, 42)
|
|
||||||
self.assertEqual(result.object_payload, b'HELLO')
|
|
||||||
|
|
||||||
q = queue.Queue()
|
|
||||||
# pylint: disable=protected-access
|
|
||||||
proofofwork._pow_worker(obj.pow_target(), obj.pow_initial_hash(), q)
|
|
||||||
try:
|
|
||||||
nonce = q.get(timeout=5)
|
|
||||||
except queue.Empty:
|
|
||||||
self.fail("No nonce found in the queue")
|
|
||||||
|
|
||||||
obj = structure.Object(
|
|
||||||
nonce, obj.expires_time, obj.object_type, obj.version,
|
|
||||||
obj.stream_number, obj.object_payload)
|
|
||||||
self.assertTrue(obj.is_valid())
|
|
||||||
|
|
64
minode/tests/test_tor.py
Normal file
64
minode/tests/test_tor.py
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
"""Tests for tor module"""
|
||||||
|
import collections
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from minode import shared
|
||||||
|
|
||||||
|
from .common import tor_port_free
|
||||||
|
|
||||||
|
try:
|
||||||
|
from minode import tor
|
||||||
|
except ImportError:
|
||||||
|
tor = None
|
||||||
|
|
||||||
|
|
||||||
|
Proxy = collections.namedtuple('Proxy', ['hostname', 'port'])
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skipIf(
|
||||||
|
tor_port_free or tor is None, 'Inapropriate environment for tor service')
|
||||||
|
class TestTor(unittest.TestCase):
|
||||||
|
"""A test case running the tor service"""
|
||||||
|
tor = None
|
||||||
|
_files = ['onion_dest_priv.key', 'onion_dest.pub']
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def cleanup(cls):
|
||||||
|
"""Remove used files"""
|
||||||
|
for f in cls._files:
|
||||||
|
try:
|
||||||
|
os.remove(os.path.join(shared.data_directory, f))
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setUpClass(cls):
|
||||||
|
shared.data_directory = tempfile.gettempdir()
|
||||||
|
shared.socks_proxy = Proxy('127.0.0.1', 9050)
|
||||||
|
cls.cleanup()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tearDownClass(cls):
|
||||||
|
if cls.tor:
|
||||||
|
cls.tor.close()
|
||||||
|
cls.cleanup()
|
||||||
|
|
||||||
|
def test_tor(self):
|
||||||
|
"""Start the tor service as in main and check the environment"""
|
||||||
|
self.tor = tor.start_tor_service()
|
||||||
|
if not self.tor:
|
||||||
|
self.fail('The tor service has hot started.')
|
||||||
|
|
||||||
|
with open(
|
||||||
|
os.path.join(shared.data_directory, 'onion_dest.pub'),
|
||||||
|
'r', encoding='ascii'
|
||||||
|
) as key_file:
|
||||||
|
onion_key = key_file.read()
|
||||||
|
self.assertEqual(onion_key + '.onion', shared.onion_hostname)
|
||||||
|
|
||||||
|
# with open(
|
||||||
|
# os.path.join(shared.data_directory, 'onion_dest_priv.key'), 'rb'
|
||||||
|
# ) as key_file:
|
||||||
|
# private_key = key_file.read()
|
147
minode/tor.py
Normal file
147
minode/tor.py
Normal file
|
@ -0,0 +1,147 @@
|
||||||
|
"""Tor specific procedures"""
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import stat
|
||||||
|
import random
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import stem
|
||||||
|
import stem.control
|
||||||
|
import stem.process
|
||||||
|
import stem.util
|
||||||
|
import stem.version
|
||||||
|
|
||||||
|
from . import shared
|
||||||
|
|
||||||
|
|
||||||
|
def logwrite(line):
|
||||||
|
"""A simple log writing handler for tor messages"""
|
||||||
|
try:
|
||||||
|
level, line = line.split('[', 1)[1].split(']', 1)
|
||||||
|
except (IndexError, ValueError):
|
||||||
|
logging.warning(line)
|
||||||
|
else:
|
||||||
|
if level in ('err', 'warn'):
|
||||||
|
logging.info('(tor)%s', line)
|
||||||
|
|
||||||
|
|
||||||
|
def start_tor_service():
|
||||||
|
"""Start own tor instance and configure a hidden service"""
|
||||||
|
try:
|
||||||
|
socket_dir = os.path.join(shared.data_directory, 'tor')
|
||||||
|
os.makedirs(socket_dir, exist_ok=True)
|
||||||
|
except OSError:
|
||||||
|
try:
|
||||||
|
socket_dir = tempfile.mkdtemp()
|
||||||
|
except OSError:
|
||||||
|
logging.info('Failed to create a temp dir.')
|
||||||
|
return
|
||||||
|
|
||||||
|
if os.getuid() == 0:
|
||||||
|
logging.info('Tor is not going to start as root')
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
present_permissions = os.stat(socket_dir)[0]
|
||||||
|
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
||||||
|
allowed_permissions = ((1 << 32) - 1) ^ disallowed_permissions
|
||||||
|
os.chmod(socket_dir, allowed_permissions & present_permissions)
|
||||||
|
except OSError:
|
||||||
|
logging.debug('Failed to set dir permissions.')
|
||||||
|
return
|
||||||
|
|
||||||
|
stem.util.log.get_logger().setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
control_socket = os.path.abspath(os.path.join(socket_dir, 'tor_control'))
|
||||||
|
port = str(shared.socks_proxy.port)
|
||||||
|
tor_config = {
|
||||||
|
'SocksPort': port,
|
||||||
|
'ControlSocket': control_socket}
|
||||||
|
|
||||||
|
for attempt in range(20):
|
||||||
|
if attempt > 0:
|
||||||
|
port = random.randint(32767, 65535) # nosec B311
|
||||||
|
tor_config['SocksPort'] = str(port)
|
||||||
|
try:
|
||||||
|
stem.process.launch_tor_with_config(
|
||||||
|
tor_config, take_ownership=True, timeout=90,
|
||||||
|
init_msg_handler=logwrite)
|
||||||
|
except OSError:
|
||||||
|
if not attempt:
|
||||||
|
if not shared.listen_for_connections:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
stem.version.get_system_tor_version()
|
||||||
|
except IOError:
|
||||||
|
return
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
logging.info('Started tor on port %s', port)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
logging.debug('Failed to start tor.')
|
||||||
|
return
|
||||||
|
|
||||||
|
if not shared.listen_for_connections:
|
||||||
|
return True
|
||||||
|
|
||||||
|
try:
|
||||||
|
controller = stem.control.Controller.from_socket_file(control_socket)
|
||||||
|
controller.authenticate()
|
||||||
|
except stem.SocketError:
|
||||||
|
logging.debug('Failed to instantiate or authenticate on controller.')
|
||||||
|
return
|
||||||
|
|
||||||
|
onionkey = onionkeytype = None
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
os.path.join(shared.data_directory, 'onion_dest_priv.key'),
|
||||||
|
'r', encoding='ascii'
|
||||||
|
) as src:
|
||||||
|
onionkey = src.read()
|
||||||
|
logging.debug('Loaded onion service private key.')
|
||||||
|
onionkeytype = 'ED25519-V3'
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
logging.info(
|
||||||
|
'Error while loading onion service private key.', exc_info=True)
|
||||||
|
|
||||||
|
response = controller.create_ephemeral_hidden_service(
|
||||||
|
shared.listening_port, key_type=onionkeytype or 'NEW',
|
||||||
|
key_content=onionkey or 'BEST'
|
||||||
|
)
|
||||||
|
|
||||||
|
if not response.is_ok():
|
||||||
|
logging.info('Bad response from controller ):')
|
||||||
|
return
|
||||||
|
|
||||||
|
shared.onion_hostname = '{}.onion'.format(response.service_id)
|
||||||
|
logging.info('Started hidden service %s', shared.onion_hostname)
|
||||||
|
|
||||||
|
if onionkey:
|
||||||
|
return controller
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
os.path.join(shared.data_directory, 'onion_dest_priv.key'),
|
||||||
|
'w', encoding='ascii'
|
||||||
|
) as src:
|
||||||
|
src.write(response.private_key)
|
||||||
|
logging.debug('Saved onion service private key.')
|
||||||
|
except Exception:
|
||||||
|
logging.warning(
|
||||||
|
'Error while saving onion service private key.', exc_info=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
os.path.join(shared.data_directory, 'onion_dest.pub'),
|
||||||
|
'w', encoding='ascii'
|
||||||
|
) as src:
|
||||||
|
src.write(response.service_id)
|
||||||
|
logging.debug('Saved onion service public key.')
|
||||||
|
except Exception:
|
||||||
|
logging.warning(
|
||||||
|
'Error while saving onion service public key.', exc_info=True)
|
||||||
|
|
||||||
|
return controller
|
|
@ -1,2 +1,4 @@
|
||||||
coverage
|
coverage
|
||||||
psutil
|
psutil
|
||||||
|
PySocks
|
||||||
|
stem
|
||||||
|
|
1
setup.py
1
setup.py
|
@ -24,6 +24,7 @@ setup(
|
||||||
packages=find_packages(exclude=('*tests',)),
|
packages=find_packages(exclude=('*tests',)),
|
||||||
package_data={'': ['*.csv', 'tls/*.pem']},
|
package_data={'': ['*.csv', 'tls/*.pem']},
|
||||||
entry_points={'console_scripts': ['minode = minode.main:main']},
|
entry_points={'console_scripts': ['minode = minode.main:main']},
|
||||||
|
extras_require={'proxy': ['PySocks'], 'tor': ['PySocks', 'stem>1.8.0']},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"License :: OSI Approved :: MIT License"
|
"License :: OSI Approved :: MIT License"
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
|
|
2
tox.ini
2
tox.ini
|
@ -3,6 +3,8 @@ envlist = reset,py3{6,7,8,9,10,11},stats
|
||||||
skip_missing_interpreters = true
|
skip_missing_interpreters = true
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
|
setenv =
|
||||||
|
HOME = {envtmpdir}
|
||||||
deps = -rrequirements.txt
|
deps = -rrequirements.txt
|
||||||
commands =
|
commands =
|
||||||
coverage run -a -m tests
|
coverage run -a -m tests
|
||||||
|
|
Loading…
Reference in New Issue
Block a user