removed unusual files
This commit is contained in:
parent
a69f2a6bbe
commit
98720a4aa4
|
@ -1,304 +0,0 @@
|
|||
"""
|
||||
Bitmessage mock
|
||||
"""
|
||||
from pybitmessage.class_addressGenerator import addressGenerator
|
||||
from pybitmessage.class_singleWorker import singleWorker
|
||||
from pybitmessage.class_objectProcessor import objectProcessor
|
||||
from pybitmessage.inventory import Inventory
|
||||
from pybitmessage.bmconfigparser import BMConfigParser
|
||||
from pybitmessage.class_singleCleaner import singleCleaner
|
||||
from pybitmessage import state
|
||||
from pybitmessage.network.threads import StoppableThread
|
||||
|
||||
# from pybitmessage.network.connectionpool import BMConnectionPool
|
||||
# from pybitmessage.network.networkthread import BMNetworkThread
|
||||
# from pybitmessage.network.receivequeuethread import ReceiveQueueThread
|
||||
|
||||
# pylint: disable=too-few-public-methods,no-init,old-style-class
|
||||
class MockMain:
|
||||
"""Mock main function"""
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def start(self):
|
||||
"""Start main application"""
|
||||
# pylint: disable=too-many-statements,too-many-branches,too-many-locals, unused-variable
|
||||
config = BMConfigParser()
|
||||
daemon = config.safeGetBoolean('bitmessagesettings', 'daemon')
|
||||
|
||||
# Start the address generation thread
|
||||
addressGeneratorThread = addressGenerator()
|
||||
# close the main program even if there are threads left
|
||||
addressGeneratorThread.daemon = True
|
||||
addressGeneratorThread.start()
|
||||
|
||||
# Start the thread that calculates POWs
|
||||
singleWorkerThread = singleWorker()
|
||||
# close the main program even if there are threads left
|
||||
singleWorkerThread.daemon = True
|
||||
singleWorkerThread.start()
|
||||
|
||||
# Start the thread that calculates POWs
|
||||
objectProcessorThread = objectProcessor()
|
||||
# DON'T close the main program even the thread remains.
|
||||
# This thread checks the shutdown variable after processing
|
||||
# each object.
|
||||
objectProcessorThread.daemon = False
|
||||
objectProcessorThread.start()
|
||||
|
||||
Inventory() # init
|
||||
|
||||
# # Start the cleanerThread
|
||||
singleCleanerThread = singleCleaner()
|
||||
# # close the main program even if there are threads left
|
||||
singleCleanerThread.daemon = True
|
||||
singleCleanerThread.start()
|
||||
# Not needed if objproc disabled
|
||||
# if state.enableObjProc:
|
||||
# shared.reloadMyAddressHashes()
|
||||
# shared.reloadBroadcastSendersForWhichImWatching()
|
||||
# API is also objproc dependent
|
||||
# if config.safeGetBoolean('bitmessagesettings', 'apienabled'):
|
||||
# # pylint: disable=relative-import
|
||||
# from pybitmessage import api
|
||||
# singleAPIThread = api.singleAPI()
|
||||
# # close the main program even if there are threads left
|
||||
# singleAPIThread.daemon = True
|
||||
# singleAPIThread.start()
|
||||
|
||||
# # start network components if networking is enabled
|
||||
# if state.enableNetwork:
|
||||
# # start_proxyconfig()
|
||||
# # BMConnectionPool().connectToStream(1)
|
||||
# asyncoreThread = BMNetworkThread()
|
||||
# asyncoreThread.daemon = True
|
||||
# asyncoreThread.start()
|
||||
|
||||
# for i in range(config.safeGet('threads', 'receive')):
|
||||
# receiveQueueThread = ReceiveQueueThread(i)
|
||||
# receiveQueueThread.daemon = True
|
||||
# receiveQueueThread.start()
|
||||
# announceThread = AnnounceThread()
|
||||
# announceThread.daemon = True
|
||||
# announceThread.start()
|
||||
# state.invThread = InvThread()
|
||||
# state.invThread.daemon = True
|
||||
# state.invThread.start()
|
||||
# state.addrThread = AddrThread()
|
||||
# state.addrThread.daemon = True
|
||||
# state.addrThread.start()
|
||||
# state.downloadThread = DownloadThread()
|
||||
# state.downloadThread.daemon = True
|
||||
# state.downloadThread.start()
|
||||
# state.uploadThread = UploadThread()
|
||||
# state.uploadThread.daemon = True
|
||||
# state.uploadThread.start()
|
||||
|
||||
# if config.safeGetBoolean('bitmessagesettings', 'upnp'):
|
||||
# import upnp
|
||||
# upnpThread = upnp.uPnPThread()
|
||||
# upnpThread.start()
|
||||
# else:
|
||||
# # Populate with hardcoded value (same as connectToStream above)
|
||||
# state.streamsInWhichIAmParticipating.append(1)
|
||||
# if not daemon and state.enableGUI:
|
||||
# if state.curses:
|
||||
# if not depends.check_curses():
|
||||
# sys.exit()
|
||||
# print('Running with curses')
|
||||
# import bitmessagecurses
|
||||
# bitmessagecurses.runwrapper()
|
||||
|
||||
# config.remove_option('bitmessagesettings', 'dontconnect')
|
||||
# pylint: disable=no-member,import-error,no-name-in-module,relative-import
|
||||
from pybitmessage.mpybit import NavigateApp
|
||||
state.kivyapp = NavigateApp()
|
||||
print('NavigateApp() ----------------------')
|
||||
state.kivyapp.run()
|
||||
print('state.kivyapp.run() ----------------------')
|
||||
|
||||
|
||||
# else:
|
||||
# config.remove_option('bitmessagesettings', 'dontconnect')
|
||||
|
||||
# if daemon:
|
||||
# while state.shutdown == 0:
|
||||
# time.sleep(1)
|
||||
# if (
|
||||
# state.testmode
|
||||
# and time.time() - state.last_api_response >= 30
|
||||
# ):
|
||||
# self.stop()
|
||||
# elif not state.enableGUI:
|
||||
# state.enableGUI = True
|
||||
# # pylint: disable=relative-import
|
||||
# from tests import core as test_core
|
||||
# test_core_result = test_core.run()
|
||||
# state.enableGUI = True
|
||||
# self.stop()
|
||||
# test_core.cleanup()
|
||||
# sys.exit(
|
||||
# 'Core tests failed!'
|
||||
# if test_core_result.errors or test_core_result.failures
|
||||
# else 0
|
||||
# )
|
||||
|
||||
# @staticmethod
|
||||
# def daemonize():
|
||||
# """Running as a daemon. Send signal in end."""
|
||||
# grandfatherPid = os.getpid()
|
||||
# parentPid = None
|
||||
# try:
|
||||
# if os.fork():
|
||||
# # unlock
|
||||
# state.thisapp.cleanup()
|
||||
# # wait until grandchild ready
|
||||
# while True:
|
||||
# time.sleep(1)
|
||||
|
||||
# os._exit(0) # pylint: disable=protected-access
|
||||
# except AttributeError:
|
||||
# # fork not implemented
|
||||
# pass
|
||||
# else:
|
||||
# parentPid = os.getpid()
|
||||
# state.thisapp.lock() # relock
|
||||
|
||||
# os.umask(0)
|
||||
# try:
|
||||
# os.setsid()
|
||||
# except AttributeError:
|
||||
# # setsid not implemented
|
||||
# pass
|
||||
# try:
|
||||
# if os.fork():
|
||||
# # unlock
|
||||
# state.thisapp.cleanup()
|
||||
# # wait until child ready
|
||||
# while True:
|
||||
# time.sleep(1)
|
||||
# os._exit(0) # pylint: disable=protected-access
|
||||
# except AttributeError:
|
||||
# # fork not implemented
|
||||
# pass
|
||||
# else:
|
||||
# state.thisapp.lock() # relock
|
||||
# state.thisapp.lockPid = None # indicate we're the final child
|
||||
# sys.stdout.flush()
|
||||
# sys.stderr.flush()
|
||||
# if not sys.platform.startswith('win'):
|
||||
# si = file(os.devnull, 'r')
|
||||
# so = file(os.devnull, 'a+')
|
||||
# se = file(os.devnull, 'a+', 0)
|
||||
# os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
# os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
# os.dup2(se.fileno(), sys.stderr.fileno())
|
||||
# if parentPid:
|
||||
# # signal ready
|
||||
# os.kill(parentPid, signal.SIGTERM)
|
||||
# os.kill(grandfatherPid, signal.SIGTERM)
|
||||
|
||||
# @staticmethod
|
||||
# def setSignalHandler():
|
||||
# """Setting the Signal Handler"""
|
||||
# signal.signal(signal.SIGINT, signal_handler)
|
||||
# signal.signal(signal.SIGTERM, signal_handler)
|
||||
# # signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
# @staticmethod
|
||||
# def usage():
|
||||
# """Displaying the usages"""
|
||||
# print('Usage: ' + sys.argv[0] + ' [OPTIONS]')
|
||||
# print('''
|
||||
# Options:
|
||||
# -h, --help show this help message and exit
|
||||
# -c, --curses use curses (text mode) interface
|
||||
# -d, --daemon run in daemon (background) mode
|
||||
# -t, --test dryrun, make testing
|
||||
|
||||
# All parameters are optional.
|
||||
# ''')
|
||||
|
||||
# @staticmethod
|
||||
# def stop():
|
||||
# """Stop main application"""
|
||||
# with printLock:
|
||||
# print('Stopping Bitmessage Deamon.')
|
||||
# shutdown.doCleanShutdown()
|
||||
|
||||
# # .. todo:: nice function but no one is using this
|
||||
# @staticmethod
|
||||
# def getApiAddress():
|
||||
# """This function returns API address and port"""
|
||||
# if not BMConfigParser().safeGetBoolean(
|
||||
# 'bitmessagesettings', 'apienabled'):
|
||||
# return None
|
||||
# address = BMConfigParser().get('bitmessagesettings', 'apiinterface')
|
||||
# port = BMConfigParser().getint('bitmessagesettings', 'apiport')
|
||||
# return {'address': address, 'port': port}
|
||||
|
||||
# def start_proxyconfig():
|
||||
# """Check socksproxytype and start any proxy configuration plugin"""
|
||||
# if not get_plugin:
|
||||
# return
|
||||
# config = BMConfigParser()
|
||||
# proxy_type = config.safeGet('bitmessagesettings', 'socksproxytype')
|
||||
# if proxy_type and proxy_type not in ('none', 'SOCKS4a', 'SOCKS5'):
|
||||
# try:
|
||||
# proxyconfig_start = time.time()
|
||||
# if not get_plugin('proxyconfig', name=proxy_type)(config):
|
||||
# raise TypeError()
|
||||
# except TypeError:
|
||||
# # cannot import shutdown here ):
|
||||
# logger.error(
|
||||
# 'Failed to run proxy config plugin %s',
|
||||
# proxy_type, exc_info=True)
|
||||
# os._exit(0) # pylint: disable=protected-access
|
||||
# else:
|
||||
# logger.info(
|
||||
# 'Started proxy config plugin %s in %s sec',
|
||||
# proxy_type, time.time() - proxyconfig_start)
|
||||
|
||||
|
||||
|
||||
# class AnnounceThread(StoppableThread):
|
||||
# """A thread to manage regular announcing of this node"""
|
||||
# name = "Announcer"
|
||||
|
||||
# def run(self):
|
||||
# lastSelfAnnounced = 0
|
||||
# while not self._stopped and state.shutdown == 0:
|
||||
# processed = 0
|
||||
# if lastSelfAnnounced < time.time() - UDPSocket.announceInterval:
|
||||
# self.announceSelf()
|
||||
# lastSelfAnnounced = time.time()
|
||||
# if processed == 0:
|
||||
# self.stop.wait(10)
|
||||
|
||||
# @staticmethod
|
||||
# def announceSelf():
|
||||
# """Announce our presence"""
|
||||
# for connection in [udpSockets for udpSockets in BMConnectionPool().udpSockets.values()]:
|
||||
# if not connection.announcing:
|
||||
# continue
|
||||
# for stream in state.streamsInWhichIAmParticipating:
|
||||
# addr = (
|
||||
# stream,
|
||||
# # state.Peer('127.0.0.1',int( BMConfigParser().safeGet("bitmessagesettings", "port"))),
|
||||
# # int(time.time()))
|
||||
# # connection.append_write_buf(BMProto.assembleAddr([addr]))
|
||||
# Peer(
|
||||
# '127.0.0.1',
|
||||
# BMConfigParser().safeGetInt(
|
||||
# 'bitmessagesettings', 'port')),
|
||||
# time.time())
|
||||
# connection.append_write_buf(assemble_addr([addr]))
|
||||
|
||||
|
||||
def main():
|
||||
"""Triggers main module"""
|
||||
mainprogram = MockMain()
|
||||
mainprogram.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -2,13 +2,10 @@
|
|||
Bitmessage mock
|
||||
"""
|
||||
from pybitmessage.class_addressGenerator import addressGenerator
|
||||
from pybitmessage.class_singleWorker import singleWorker
|
||||
from pybitmessage.class_objectProcessor import objectProcessor
|
||||
from pybitmessage.inventory import Inventory
|
||||
from pybitmessage.bmconfigparser import BMConfigParser
|
||||
from pybitmessage.class_singleCleaner import singleCleaner
|
||||
from pybitmessage import state
|
||||
from pybitmessage.network.threads import StoppableThread
|
||||
|
||||
|
||||
class MockMain:
|
||||
"""Mock main function"""
|
||||
|
@ -25,27 +22,27 @@ class MockMain:
|
|||
addressGeneratorThread.daemon = True
|
||||
addressGeneratorThread.start()
|
||||
|
||||
# Start the thread that calculates POWs
|
||||
singleWorkerThread = singleWorker()
|
||||
# close the main program even if there are threads left
|
||||
singleWorkerThread.daemon = True
|
||||
singleWorkerThread.start()
|
||||
# # Start the thread that calculates POWs
|
||||
# singleWorkerThread = singleWorker()
|
||||
# # close the main program even if there are threads left
|
||||
# singleWorkerThread.daemon = True
|
||||
# singleWorkerThread.start()
|
||||
|
||||
# Start the thread that calculates POWs
|
||||
objectProcessorThread = objectProcessor()
|
||||
# DON'T close the main program even the thread remains.
|
||||
# This thread checks the shutdown variable after processing
|
||||
# each object.
|
||||
objectProcessorThread.daemon = False
|
||||
objectProcessorThread.start()
|
||||
# # Start the thread that calculates POWs
|
||||
# objectProcessorThread = objectProcessor()
|
||||
# # DON'T close the main program even the thread remains.
|
||||
# # This thread checks the shutdown variable after processing
|
||||
# # each object.
|
||||
# objectProcessorThread.daemon = False
|
||||
# objectProcessorThread.start()
|
||||
|
||||
Inventory() # init
|
||||
|
||||
# Start the cleanerThread
|
||||
singleCleanerThread = singleCleaner()
|
||||
# close the main program even if there are threads left
|
||||
singleCleanerThread.daemon = True
|
||||
singleCleanerThread.start()
|
||||
# # Start the cleanerThread
|
||||
# singleCleanerThread = singleCleaner()
|
||||
# # close the main program even if there are threads left
|
||||
# singleCleanerThread.daemon = True
|
||||
# singleCleanerThread.start()
|
||||
|
||||
from pybitmessage.mpybit import NavigateApp
|
||||
state.kivyapp = NavigateApp()
|
||||
|
|
|
@ -1,23 +1,4 @@
|
|||
# """Mock kivy app with mock threads."""
|
||||
|
||||
# from pybitmessage import state
|
||||
# from pybitmessage.mpybit import NavigateApp
|
||||
# from pybitmessage.class_addressGenerator import addressGenerator
|
||||
|
||||
|
||||
# def main():
|
||||
# """main method for starting threads"""
|
||||
# # Start the address generation thread
|
||||
# addressGeneratorThread = addressGenerator()
|
||||
# # close the main program even if there are threads left
|
||||
# addressGeneratorThread.daemon = True
|
||||
# addressGeneratorThread.start()
|
||||
|
||||
# state.kivyapp = NavigateApp()
|
||||
# state.kivyapp.run()
|
||||
|
||||
# if __name__ == '__main__':
|
||||
# main()
|
||||
"""Mock kivy app with mock threads."""
|
||||
|
||||
|
||||
"""This module is for thread start."""
|
||||
|
|
|
@ -1,1504 +0,0 @@
|
|||
"""
|
||||
This is not what you run to run the Bitmessage API. Instead, enable the API
|
||||
( https://bitmessage.org/wiki/API ) and optionally enable daemon mode
|
||||
( https://bitmessage.org/wiki/Daemon ) then run bitmessagemain.py.
|
||||
"""
|
||||
# Copyright (c) 2012-2016 Jonathan Warren
|
||||
# Copyright (c) 2012-2020 The Bitmessage developers
|
||||
# pylint: disable=too-many-lines,no-self-use,unused-variable,unused-argument
|
||||
import base64
|
||||
import errno
|
||||
import hashlib
|
||||
import json
|
||||
import random # nosec
|
||||
import socket
|
||||
import subprocess
|
||||
import time
|
||||
from binascii import hexlify, unhexlify
|
||||
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler, \
|
||||
SimpleXMLRPCServer
|
||||
from struct import pack
|
||||
|
||||
import defaults
|
||||
import helper_inbox
|
||||
import helper_sent
|
||||
import network.stats
|
||||
import proofofwork
|
||||
import queues
|
||||
import shared
|
||||
import shutdown
|
||||
import state
|
||||
import threads
|
||||
from addresses import (
|
||||
addBMIfNotPresent,
|
||||
calculateInventoryHash,
|
||||
decodeAddress,
|
||||
decodeVarint,
|
||||
varintDecodeError
|
||||
)
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from helper_ackPayload import genAckPayload
|
||||
from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery, sqlStoredProcedure
|
||||
from inventory import Inventory
|
||||
from network.threads import StoppableThread
|
||||
from version import softwareVersion
|
||||
|
||||
str_chan = '[chan]'
|
||||
|
||||
|
||||
class APIError(Exception):
|
||||
"""APIError exception class"""
|
||||
|
||||
def __init__(self, error_number, error_message):
|
||||
super(APIError, self).__init__()
|
||||
self.error_number = error_number
|
||||
self.error_message = error_message
|
||||
|
||||
def __str__(self):
|
||||
return "API Error %04i: %s" % (self.error_number, self.error_message)
|
||||
|
||||
|
||||
class StoppableXMLRPCServer(SimpleXMLRPCServer):
|
||||
"""A SimpleXMLRPCServer that honours state.shutdown"""
|
||||
# pylint:disable=too-few-public-methods
|
||||
allow_reuse_address = True
|
||||
|
||||
def serve_forever(self):
|
||||
"""Start the SimpleXMLRPCServer"""
|
||||
# pylint: disable=arguments-differ
|
||||
while state.shutdown == 0:
|
||||
self.handle_request()
|
||||
|
||||
|
||||
# This thread, of which there is only one, runs the API.
|
||||
class singleAPI(StoppableThread):
|
||||
"""API thread"""
|
||||
|
||||
name = "singleAPI"
|
||||
|
||||
def stopThread(self):
|
||||
super(singleAPI, self).stopThread()
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
s.connect((
|
||||
BMConfigParser().get('bitmessagesettings', 'apiinterface'),
|
||||
BMConfigParser().getint('bitmessagesettings', 'apiport')
|
||||
))
|
||||
s.shutdown(socket.SHUT_RDWR)
|
||||
s.close()
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
port = BMConfigParser().getint('bitmessagesettings', 'apiport')
|
||||
try:
|
||||
getattr(errno, 'WSAEADDRINUSE')
|
||||
except AttributeError:
|
||||
errno.WSAEADDRINUSE = errno.EADDRINUSE
|
||||
for attempt in range(50):
|
||||
try:
|
||||
if attempt > 0:
|
||||
logger.warning(
|
||||
'Failed to start API listener on port %s', port)
|
||||
port = random.randint(32767, 65535)
|
||||
se = StoppableXMLRPCServer(
|
||||
(BMConfigParser().get(
|
||||
'bitmessagesettings', 'apiinterface'),
|
||||
port),
|
||||
MySimpleXMLRPCRequestHandler, True, True)
|
||||
except socket.error as e:
|
||||
if e.errno in (errno.EADDRINUSE, errno.WSAEADDRINUSE):
|
||||
continue
|
||||
else:
|
||||
if attempt > 0:
|
||||
logger.warning('Setting apiport to %s', port)
|
||||
BMConfigParser().set(
|
||||
'bitmessagesettings', 'apiport', str(port))
|
||||
BMConfigParser().save()
|
||||
break
|
||||
# se.register_introspection_functions()
|
||||
|
||||
# apiNotifyPath = BMConfigParser().safeGet(
|
||||
# 'bitmessagesettings', 'apinotifypath')
|
||||
|
||||
# if apiNotifyPath:
|
||||
# logger.info('Trying to call %s', apiNotifyPath)
|
||||
# try:
|
||||
# subprocess.call([apiNotifyPath, "startingUp"])
|
||||
# except OSError:
|
||||
# logger.warning(
|
||||
# 'Failed to call %s, removing apinotifypath setting',
|
||||
# apiNotifyPath)
|
||||
# BMConfigParser().remove_option(
|
||||
# 'bitmessagesettings', 'apinotifypath')
|
||||
|
||||
# se.serve_forever()
|
||||
|
||||
|
||||
class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
"""
|
||||
This is one of several classes that constitute the API
|
||||
|
||||
This class was written by Vaibhav Bhatia.
|
||||
Modified by Jonathan Warren (Atheros).
|
||||
http://code.activestate.com/recipes/501148-xmlrpc-serverclient-which-does-cookie-handling-and/
|
||||
"""
|
||||
# pylint: disable=too-many-public-methods
|
||||
|
||||
def do_POST(self):
|
||||
"""
|
||||
Handles the HTTP POST request.
|
||||
|
||||
Attempts to interpret all HTTP POST requests as XML-RPC calls,
|
||||
which are forwarded to the server's _dispatch method for handling.
|
||||
|
||||
Note: this method is the same as in SimpleXMLRPCRequestHandler,
|
||||
just hacked to handle cookies
|
||||
"""
|
||||
# Check that the path is legal
|
||||
if not self.is_rpc_path_valid():
|
||||
self.report_404()
|
||||
return
|
||||
|
||||
try:
|
||||
# Get arguments by reading body of request.
|
||||
# We read this in chunks to avoid straining
|
||||
# socket.read(); around the 10 or 15Mb mark, some platforms
|
||||
# begin to have problems (bug #792570).
|
||||
max_chunk_size = 10 * 1024 * 1024
|
||||
size_remaining = int(self.headers["content-length"])
|
||||
L = []
|
||||
while size_remaining:
|
||||
chunk_size = min(size_remaining, max_chunk_size)
|
||||
L.append(self.rfile.read(chunk_size))
|
||||
size_remaining -= len(L[-1])
|
||||
data = ''.join(L)
|
||||
|
||||
# In previous versions of SimpleXMLRPCServer, _dispatch
|
||||
# could be overridden in this class, instead of in
|
||||
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
|
||||
# check to see if a subclass implements _dispatch and dispatch
|
||||
# using that method if present.
|
||||
# pylint: disable=protected-access
|
||||
response = self.server._marshaled_dispatch(
|
||||
data, getattr(self, '_dispatch', None)
|
||||
)
|
||||
# This should only happen if the module is buggy
|
||||
except BaseException:
|
||||
# internal error, report as HTTP server error
|
||||
self.send_response(500)
|
||||
self.end_headers()
|
||||
else:
|
||||
# got a valid XML RPC response
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/xml")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
|
||||
# HACK :start -> sends cookies here
|
||||
if self.cookies:
|
||||
for cookie in self.cookies:
|
||||
self.send_header('Set-Cookie', cookie.output(header=''))
|
||||
# HACK :end
|
||||
|
||||
self.end_headers()
|
||||
self.wfile.write(response)
|
||||
|
||||
# shut down the connection
|
||||
self.wfile.flush()
|
||||
self.connection.shutdown(1)
|
||||
|
||||
# actually handle shutdown command after sending response
|
||||
if state.shutdown is False:
|
||||
shutdown.doCleanShutdown()
|
||||
|
||||
def APIAuthenticateClient(self):
|
||||
"""Predicate to check for valid API credentials in the request header"""
|
||||
|
||||
if 'Authorization' in self.headers:
|
||||
# handle Basic authentication
|
||||
_, encstr = self.headers.get('Authorization').split()
|
||||
emailid, password = encstr.decode('base64').split(':')
|
||||
return (
|
||||
emailid == BMConfigParser().get(
|
||||
'bitmessagesettings', 'apiusername') and
|
||||
password == BMConfigParser().get(
|
||||
'bitmessagesettings', 'apipassword')
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
'Authentication failed because header lacks'
|
||||
' Authentication field')
|
||||
time.sleep(2)
|
||||
|
||||
return False
|
||||
|
||||
def _decode(self, text, decode_type):
|
||||
try:
|
||||
if decode_type == 'hex':
|
||||
return unhexlify(text)
|
||||
elif decode_type == 'base64':
|
||||
return base64.b64decode(text)
|
||||
except Exception as e:
|
||||
raise APIError(
|
||||
22, "Decode error - %s. Had trouble while decoding string: %r"
|
||||
% (e, text)
|
||||
)
|
||||
return None
|
||||
|
||||
def _verifyAddress(self, address):
|
||||
status, addressVersionNumber, streamNumber, ripe = \
|
||||
decodeAddress(address)
|
||||
if status != 'success':
|
||||
logger.warning(
|
||||
'API Error 0007: Could not decode address %s. Status: %s.',
|
||||
address, status
|
||||
)
|
||||
|
||||
if status == 'checksumfailed':
|
||||
raise APIError(8, 'Checksum failed for address: ' + address)
|
||||
if status == 'invalidcharacters':
|
||||
raise APIError(9, 'Invalid characters in address: ' + address)
|
||||
if status == 'versiontoohigh':
|
||||
raise APIError(
|
||||
10,
|
||||
'Address version number too high (or zero) in address: ' +
|
||||
address)
|
||||
if status == 'varintmalformed':
|
||||
raise APIError(26, 'Malformed varint in address: ' + address)
|
||||
raise APIError(
|
||||
7, 'Could not decode address: %s : %s' % (address, status))
|
||||
if addressVersionNumber < 2 or addressVersionNumber > 4:
|
||||
raise APIError(
|
||||
11, 'The address version number currently must be 2, 3 or 4.'
|
||||
' Others aren\'t supported. Check the address.'
|
||||
)
|
||||
if streamNumber != 1:
|
||||
raise APIError(
|
||||
12, 'The stream number must be 1. Others aren\'t supported.'
|
||||
' Check the address.'
|
||||
)
|
||||
|
||||
return (status, addressVersionNumber, streamNumber, ripe)
|
||||
|
||||
# Request Handlers
|
||||
|
||||
def HandleListAddresses(self, method):
|
||||
"""Handle a request to list addresses"""
|
||||
data = '{"addresses":['
|
||||
for addressInKeysFile in BMConfigParser().addresses():
|
||||
status, addressVersionNumber, streamNumber, hash01 = decodeAddress(
|
||||
addressInKeysFile)
|
||||
if len(data) > 20:
|
||||
data += ','
|
||||
if BMConfigParser().has_option(addressInKeysFile, 'chan'):
|
||||
chan = BMConfigParser().getboolean(addressInKeysFile, 'chan')
|
||||
else:
|
||||
chan = False
|
||||
label = BMConfigParser().get(addressInKeysFile, 'label')
|
||||
if method == 'listAddresses2':
|
||||
label = base64.b64encode(label)
|
||||
data += json.dumps({
|
||||
'label': label,
|
||||
'address': addressInKeysFile,
|
||||
'stream': streamNumber,
|
||||
'enabled':
|
||||
BMConfigParser().getboolean(addressInKeysFile, 'enabled'),
|
||||
'chan': chan
|
||||
}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleListAddressBookEntries(self, params):
|
||||
"""Handle a request to list address book entries"""
|
||||
|
||||
if len(params) == 1:
|
||||
label, = params
|
||||
label = self._decode(label, "base64")
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT label, address from addressbook WHERE label = ?",
|
||||
label)
|
||||
elif len(params) > 1:
|
||||
raise APIError(0, "Too many paremeters, max 1")
|
||||
else:
|
||||
queryreturn = sqlQuery("SELECT label, address from addressbook")
|
||||
data = '{"addresses":['
|
||||
for row in queryreturn:
|
||||
label, address = row
|
||||
label = shared.fixPotentiallyInvalidUTF8Data(label)
|
||||
if len(data) > 20:
|
||||
data += ','
|
||||
data += json.dumps({
|
||||
'label': base64.b64encode(label),
|
||||
'address': address}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleAddAddressBookEntry(self, params):
|
||||
"""Handle a request to add an address book entry"""
|
||||
|
||||
if len(params) != 2:
|
||||
raise APIError(0, "I need label and address")
|
||||
address, label = params
|
||||
label = self._decode(label, "base64")
|
||||
address = addBMIfNotPresent(address)
|
||||
self._verifyAddress(address)
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT address FROM addressbook WHERE address=?", address)
|
||||
if queryreturn != []:
|
||||
raise APIError(
|
||||
16, 'You already have this address in your address book.')
|
||||
|
||||
sqlExecute("INSERT INTO addressbook VALUES(?,?)", label, address)
|
||||
queues.UISignalQueue.put(('rerenderMessagelistFromLabels', ''))
|
||||
queues.UISignalQueue.put(('rerenderMessagelistToLabels', ''))
|
||||
queues.UISignalQueue.put(('rerenderAddressBook', ''))
|
||||
return "Added address %s to address book" % address
|
||||
|
||||
def HandleDeleteAddressBookEntry(self, params):
|
||||
"""Handle a request to delete an address book entry"""
|
||||
|
||||
if len(params) != 1:
|
||||
raise APIError(0, "I need an address")
|
||||
address, = params
|
||||
address = addBMIfNotPresent(address)
|
||||
self._verifyAddress(address)
|
||||
sqlExecute('DELETE FROM addressbook WHERE address=?', address)
|
||||
queues.UISignalQueue.put(('rerenderMessagelistFromLabels', ''))
|
||||
queues.UISignalQueue.put(('rerenderMessagelistToLabels', ''))
|
||||
queues.UISignalQueue.put(('rerenderAddressBook', ''))
|
||||
return "Deleted address book entry for %s if it existed" % address
|
||||
|
||||
def HandleCreateRandomAddress(self, params):
|
||||
"""Handle a request to create a random address"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
|
||||
elif len(params) == 1:
|
||||
label, = params
|
||||
eighteenByteRipe = False
|
||||
nonceTrialsPerByte = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
elif len(params) == 2:
|
||||
label, eighteenByteRipe = params
|
||||
nonceTrialsPerByte = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
elif len(params) == 3:
|
||||
label, eighteenByteRipe, totalDifficulty = params
|
||||
nonceTrialsPerByte = int(
|
||||
defaults.networkDefaultProofOfWorkNonceTrialsPerByte *
|
||||
totalDifficulty)
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
elif len(params) == 4:
|
||||
label, eighteenByteRipe, totalDifficulty, \
|
||||
smallMessageDifficulty = params
|
||||
nonceTrialsPerByte = int(
|
||||
defaults.networkDefaultProofOfWorkNonceTrialsPerByte *
|
||||
totalDifficulty)
|
||||
payloadLengthExtraBytes = int(
|
||||
defaults.networkDefaultPayloadLengthExtraBytes *
|
||||
smallMessageDifficulty)
|
||||
else:
|
||||
raise APIError(0, 'Too many parameters!')
|
||||
label = self._decode(label, "base64")
|
||||
try:
|
||||
unicode(label, 'utf-8')
|
||||
except BaseException:
|
||||
raise APIError(17, 'Label is not valid UTF-8 data.')
|
||||
queues.apiAddressGeneratorReturnQueue.queue.clear()
|
||||
streamNumberForAddress = 1
|
||||
queues.addressGeneratorQueue.put((
|
||||
'createRandomAddress', 4, streamNumberForAddress, label, 1, "",
|
||||
eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes
|
||||
))
|
||||
return queues.apiAddressGeneratorReturnQueue.get()
|
||||
|
||||
def HandleCreateDeterministicAddresses(self, params):
|
||||
"""Handle a request to create a deterministic address"""
|
||||
# pylint: disable=too-many-branches, too-many-statements
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
|
||||
elif len(params) == 1:
|
||||
passphrase, = params
|
||||
numberOfAddresses = 1
|
||||
addressVersionNumber = 0
|
||||
streamNumber = 0
|
||||
eighteenByteRipe = False
|
||||
nonceTrialsPerByte = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
|
||||
elif len(params) == 2:
|
||||
passphrase, numberOfAddresses = params
|
||||
addressVersionNumber = 0
|
||||
streamNumber = 0
|
||||
eighteenByteRipe = False
|
||||
nonceTrialsPerByte = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
|
||||
elif len(params) == 3:
|
||||
passphrase, numberOfAddresses, addressVersionNumber = params
|
||||
streamNumber = 0
|
||||
eighteenByteRipe = False
|
||||
nonceTrialsPerByte = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
|
||||
elif len(params) == 4:
|
||||
passphrase, numberOfAddresses, addressVersionNumber, \
|
||||
streamNumber = params
|
||||
eighteenByteRipe = False
|
||||
nonceTrialsPerByte = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
|
||||
elif len(params) == 5:
|
||||
passphrase, numberOfAddresses, addressVersionNumber, \
|
||||
streamNumber, eighteenByteRipe = params
|
||||
nonceTrialsPerByte = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultnoncetrialsperbyte')
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
|
||||
elif len(params) == 6:
|
||||
passphrase, numberOfAddresses, addressVersionNumber, \
|
||||
streamNumber, eighteenByteRipe, totalDifficulty = params
|
||||
nonceTrialsPerByte = int(
|
||||
defaults.networkDefaultProofOfWorkNonceTrialsPerByte *
|
||||
totalDifficulty)
|
||||
payloadLengthExtraBytes = BMConfigParser().get(
|
||||
'bitmessagesettings', 'defaultpayloadlengthextrabytes')
|
||||
|
||||
elif len(params) == 7:
|
||||
passphrase, numberOfAddresses, addressVersionNumber, \
|
||||
streamNumber, eighteenByteRipe, totalDifficulty, \
|
||||
smallMessageDifficulty = params
|
||||
nonceTrialsPerByte = int(
|
||||
defaults.networkDefaultProofOfWorkNonceTrialsPerByte *
|
||||
totalDifficulty)
|
||||
payloadLengthExtraBytes = int(
|
||||
defaults.networkDefaultPayloadLengthExtraBytes *
|
||||
smallMessageDifficulty)
|
||||
else:
|
||||
raise APIError(0, 'Too many parameters!')
|
||||
if not passphrase:
|
||||
raise APIError(1, 'The specified passphrase is blank.')
|
||||
if not isinstance(eighteenByteRipe, bool):
|
||||
raise APIError(
|
||||
23, 'Bool expected in eighteenByteRipe, saw %s instead' %
|
||||
type(eighteenByteRipe))
|
||||
passphrase = self._decode(passphrase, "base64")
|
||||
# 0 means "just use the proper addressVersionNumber"
|
||||
if addressVersionNumber == 0:
|
||||
addressVersionNumber = 4
|
||||
# if addressVersionNumber != 3 and addressVersionNumber != 4:
|
||||
if addressVersionNumber not in (3, 4):
|
||||
raise APIError(
|
||||
2, 'The address version number currently must be 3, 4, or 0'
|
||||
' (which means auto-select). %i isn\'t supported.' %
|
||||
addressVersionNumber)
|
||||
if streamNumber == 0: # 0 means "just use the most available stream"
|
||||
streamNumber = 1
|
||||
if streamNumber != 1:
|
||||
raise APIError(
|
||||
3, 'The stream number must be 1 (or 0 which means'
|
||||
' auto-select). Others aren\'t supported.')
|
||||
if numberOfAddresses == 0:
|
||||
raise APIError(
|
||||
4, 'Why would you ask me to generate 0 addresses for you?')
|
||||
if numberOfAddresses > 999:
|
||||
raise APIError(
|
||||
5, 'You have (accidentally?) specified too many addresses to'
|
||||
' make. Maximum 999. This check only exists to prevent'
|
||||
' mischief; if you really want to create more addresses than'
|
||||
' this, contact the Bitmessage developers and we can modify'
|
||||
' the check or you can do it yourself by searching the source'
|
||||
' code for this message.')
|
||||
queues.apiAddressGeneratorReturnQueue.queue.clear()
|
||||
logger.debug(
|
||||
'Requesting that the addressGenerator create %s addresses.',
|
||||
numberOfAddresses)
|
||||
queues.addressGeneratorQueue.put((
|
||||
'createDeterministicAddresses', addressVersionNumber, streamNumber,
|
||||
'unused API address', numberOfAddresses, passphrase,
|
||||
eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes
|
||||
))
|
||||
data = '{"addresses":['
|
||||
queueReturn = queues.apiAddressGeneratorReturnQueue.get()
|
||||
for item in queueReturn:
|
||||
if len(data) > 20:
|
||||
data += ','
|
||||
data += "\"" + item + "\""
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleGetDeterministicAddress(self, params):
|
||||
"""Handle a request to get a deterministic address"""
|
||||
|
||||
if len(params) != 3:
|
||||
raise APIError(0, 'I need exactly 3 parameters.')
|
||||
passphrase, addressVersionNumber, streamNumber = params
|
||||
numberOfAddresses = 1
|
||||
eighteenByteRipe = False
|
||||
if not passphrase:
|
||||
raise APIError(1, 'The specified passphrase is blank.')
|
||||
passphrase = self._decode(passphrase, "base64")
|
||||
# if addressVersionNumber != 3 and addressVersionNumber != 4:
|
||||
if addressVersionNumber not in (3, 4):
|
||||
raise APIError(
|
||||
2, 'The address version number currently must be 3 or 4. %i'
|
||||
' isn\'t supported.' % addressVersionNumber)
|
||||
if streamNumber != 1:
|
||||
raise APIError(
|
||||
3, ' The stream number must be 1. Others aren\'t supported.')
|
||||
queues.apiAddressGeneratorReturnQueue.queue.clear()
|
||||
logger.debug(
|
||||
'Requesting that the addressGenerator create %s addresses.',
|
||||
numberOfAddresses)
|
||||
queues.addressGeneratorQueue.put((
|
||||
'getDeterministicAddress', addressVersionNumber, streamNumber,
|
||||
'unused API address', numberOfAddresses, passphrase,
|
||||
eighteenByteRipe
|
||||
))
|
||||
return queues.apiAddressGeneratorReturnQueue.get()
|
||||
|
||||
def HandleCreateChan(self, params):
|
||||
"""Handle a request to create a chan"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters.')
|
||||
|
||||
elif len(params) == 1:
|
||||
passphrase, = params
|
||||
passphrase = self._decode(passphrase, "base64")
|
||||
|
||||
if not passphrase:
|
||||
raise APIError(1, 'The specified passphrase is blank.')
|
||||
# It would be nice to make the label the passphrase but it is
|
||||
# possible that the passphrase contains non-utf-8 characters.
|
||||
try:
|
||||
unicode(passphrase, 'utf-8')
|
||||
label = str_chan + ' ' + passphrase
|
||||
except BaseException:
|
||||
label = str_chan + ' ' + repr(passphrase)
|
||||
|
||||
addressVersionNumber = 4
|
||||
streamNumber = 1
|
||||
queues.apiAddressGeneratorReturnQueue.queue.clear()
|
||||
logger.debug(
|
||||
'Requesting that the addressGenerator create chan %s.', passphrase)
|
||||
queues.addressGeneratorQueue.put((
|
||||
'createChan', addressVersionNumber, streamNumber, label,
|
||||
passphrase, True
|
||||
))
|
||||
queueReturn = queues.apiAddressGeneratorReturnQueue.get()
|
||||
if not queueReturn:
|
||||
raise APIError(24, 'Chan address is already present.')
|
||||
address = queueReturn[0]
|
||||
return address
|
||||
|
||||
def HandleJoinChan(self, params):
|
||||
"""Handle a request to join a chan"""
|
||||
|
||||
if len(params) < 2:
|
||||
raise APIError(0, 'I need two parameters.')
|
||||
elif len(params) == 2:
|
||||
passphrase, suppliedAddress = params
|
||||
passphrase = self._decode(passphrase, "base64")
|
||||
if not passphrase:
|
||||
raise APIError(1, 'The specified passphrase is blank.')
|
||||
# It would be nice to make the label the passphrase but it is
|
||||
# possible that the passphrase contains non-utf-8 characters.
|
||||
try:
|
||||
unicode(passphrase, 'utf-8')
|
||||
label = str_chan + ' ' + passphrase
|
||||
except BaseException:
|
||||
label = str_chan + ' ' + repr(passphrase)
|
||||
status, addressVersionNumber, streamNumber, toRipe = (
|
||||
self._verifyAddress(suppliedAddress))
|
||||
suppliedAddress = addBMIfNotPresent(suppliedAddress)
|
||||
queues.apiAddressGeneratorReturnQueue.queue.clear()
|
||||
queues.addressGeneratorQueue.put((
|
||||
'joinChan', suppliedAddress, label, passphrase, True
|
||||
))
|
||||
addressGeneratorReturnValue = \
|
||||
queues.apiAddressGeneratorReturnQueue.get()
|
||||
|
||||
if addressGeneratorReturnValue[0] == \
|
||||
'chan name does not match address':
|
||||
raise APIError(18, 'Chan name does not match address.')
|
||||
if not addressGeneratorReturnValue:
|
||||
raise APIError(24, 'Chan address is already present.')
|
||||
return "success"
|
||||
|
||||
def HandleLeaveChan(self, params):
|
||||
"""Handle a request to leave a chan"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters.')
|
||||
elif len(params) == 1:
|
||||
address, = params
|
||||
status, addressVersionNumber, streamNumber, toRipe = (
|
||||
self._verifyAddress(address))
|
||||
address = addBMIfNotPresent(address)
|
||||
if not BMConfigParser().has_section(address):
|
||||
raise APIError(
|
||||
13, 'Could not find this address in your keys.dat file.')
|
||||
if not BMConfigParser().safeGetBoolean(address, 'chan'):
|
||||
raise APIError(
|
||||
25, 'Specified address is not a chan address.'
|
||||
' Use deleteAddress API call instead.')
|
||||
BMConfigParser().remove_section(address)
|
||||
with open(state.appdata + 'keys.dat', 'wb') as configfile:
|
||||
BMConfigParser().write(configfile)
|
||||
return 'success'
|
||||
|
||||
def HandleDeleteAddress(self, params):
|
||||
"""Handle a request to delete an address"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters.')
|
||||
elif len(params) == 1:
|
||||
address, = params
|
||||
status, addressVersionNumber, streamNumber, toRipe = (
|
||||
self._verifyAddress(address))
|
||||
address = addBMIfNotPresent(address)
|
||||
if not BMConfigParser().has_section(address):
|
||||
raise APIError(
|
||||
13, 'Could not find this address in your keys.dat file.')
|
||||
BMConfigParser().remove_section(address)
|
||||
with open(state.appdata + 'keys.dat', 'wb') as configfile:
|
||||
BMConfigParser().write(configfile)
|
||||
queues.UISignalQueue.put(('writeNewAddressToTable', ('', '', '')))
|
||||
shared.reloadMyAddressHashes()
|
||||
return 'success'
|
||||
|
||||
def HandleGetAllInboxMessages(self, params):
|
||||
"""Handle a request to get all inbox messages"""
|
||||
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT msgid, toaddress, fromaddress, subject, received, message,"
|
||||
" encodingtype, read FROM inbox where folder='inbox'"
|
||||
" ORDER BY received"
|
||||
)
|
||||
data = '{"inboxMessages":['
|
||||
for row in queryreturn:
|
||||
msgid, toAddress, fromAddress, subject, received, message, \
|
||||
encodingtype, read = row
|
||||
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
|
||||
message = shared.fixPotentiallyInvalidUTF8Data(message)
|
||||
if len(data) > 25:
|
||||
data += ','
|
||||
data += json.dumps({
|
||||
'msgid': hexlify(msgid),
|
||||
'toAddress': toAddress,
|
||||
'fromAddress': fromAddress,
|
||||
'subject': base64.b64encode(subject),
|
||||
'message': base64.b64encode(message),
|
||||
'encodingType': encodingtype,
|
||||
'receivedTime': received,
|
||||
'read': read}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleGetAllInboxMessageIds(self, params):
|
||||
"""Handle a request to get all inbox message IDs"""
|
||||
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT msgid FROM inbox where folder='inbox' ORDER BY received")
|
||||
data = '{"inboxMessageIds":['
|
||||
for row in queryreturn:
|
||||
msgid = row[0]
|
||||
if len(data) > 25:
|
||||
data += ','
|
||||
data += json.dumps(
|
||||
{'msgid': hexlify(msgid)}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleGetInboxMessageById(self, params):
|
||||
"""Handle a request to get an inbox messsage by ID"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
elif len(params) == 1:
|
||||
msgid = self._decode(params[0], "hex")
|
||||
elif len(params) >= 2:
|
||||
msgid = self._decode(params[0], "hex")
|
||||
readStatus = params[1]
|
||||
if not isinstance(readStatus, bool):
|
||||
raise APIError(
|
||||
23, 'Bool expected in readStatus, saw %s instead.' %
|
||||
type(readStatus))
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT read FROM inbox WHERE msgid=?", msgid)
|
||||
# UPDATE is slow, only update if status is different
|
||||
if queryreturn != [] and (queryreturn[0][0] == 1) != readStatus:
|
||||
sqlExecute(
|
||||
"UPDATE inbox set read = ? WHERE msgid=?",
|
||||
readStatus, msgid)
|
||||
queues.UISignalQueue.put(('changedInboxUnread', None))
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT msgid, toaddress, fromaddress, subject, received, message,"
|
||||
" encodingtype, read FROM inbox WHERE msgid=?", msgid
|
||||
)
|
||||
data = '{"inboxMessage":['
|
||||
for row in queryreturn:
|
||||
msgid, toAddress, fromAddress, subject, received, message, \
|
||||
encodingtype, read = row
|
||||
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
|
||||
message = shared.fixPotentiallyInvalidUTF8Data(message)
|
||||
data += json.dumps({
|
||||
'msgid': hexlify(msgid),
|
||||
'toAddress': toAddress,
|
||||
'fromAddress': fromAddress,
|
||||
'subject': base64.b64encode(subject),
|
||||
'message': base64.b64encode(message),
|
||||
'encodingType': encodingtype,
|
||||
'receivedTime': received,
|
||||
'read': read}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleGetAllSentMessages(self, params):
|
||||
"""Handle a request to get all sent messages"""
|
||||
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT msgid, toaddress, fromaddress, subject, lastactiontime,"
|
||||
" message, encodingtype, status, ackdata FROM sent"
|
||||
" WHERE folder='sent' ORDER BY lastactiontime"
|
||||
)
|
||||
data = '{"sentMessages":['
|
||||
for row in queryreturn:
|
||||
msgid, toAddress, fromAddress, subject, lastactiontime, message, \
|
||||
encodingtype, status, ackdata = row
|
||||
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
|
||||
message = shared.fixPotentiallyInvalidUTF8Data(message)
|
||||
if len(data) > 25:
|
||||
data += ','
|
||||
data += json.dumps({
|
||||
'msgid': hexlify(msgid),
|
||||
'toAddress': toAddress,
|
||||
'fromAddress': fromAddress,
|
||||
'subject': base64.b64encode(subject),
|
||||
'message': base64.b64encode(message),
|
||||
'encodingType': encodingtype,
|
||||
'lastActionTime': lastactiontime,
|
||||
'status': status,
|
||||
'ackData': hexlify(ackdata)}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleGetAllSentMessageIds(self, params):
|
||||
"""Handle a request to get all sent message IDs"""
|
||||
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT msgid FROM sent where folder='sent'"
|
||||
" ORDER BY lastactiontime"
|
||||
)
|
||||
data = '{"sentMessageIds":['
|
||||
for row in queryreturn:
|
||||
msgid = row[0]
|
||||
if len(data) > 25:
|
||||
data += ','
|
||||
data += json.dumps(
|
||||
{'msgid': hexlify(msgid)}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleInboxMessagesByReceiver(self, params):
|
||||
"""Handle a request to get inbox messages by receiver"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
toAddress = params[0]
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT msgid, toaddress, fromaddress, subject, received, message,"
|
||||
" encodingtype FROM inbox WHERE folder='inbox' AND toAddress=?",
|
||||
toAddress)
|
||||
data = '{"inboxMessages":['
|
||||
for row in queryreturn:
|
||||
msgid, toAddress, fromAddress, subject, received, message, \
|
||||
encodingtype = row
|
||||
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
|
||||
message = shared.fixPotentiallyInvalidUTF8Data(message)
|
||||
if len(data) > 25:
|
||||
data += ','
|
||||
data += json.dumps({
|
||||
'msgid': hexlify(msgid),
|
||||
'toAddress': toAddress,
|
||||
'fromAddress': fromAddress,
|
||||
'subject': base64.b64encode(subject),
|
||||
'message': base64.b64encode(message),
|
||||
'encodingType': encodingtype,
|
||||
'receivedTime': received}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleGetSentMessageById(self, params):
|
||||
"""Handle a request to get a sent message by ID"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
msgid = self._decode(params[0], "hex")
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT msgid, toaddress, fromaddress, subject, lastactiontime,"
|
||||
" message, encodingtype, status, ackdata FROM sent WHERE msgid=?",
|
||||
msgid
|
||||
)
|
||||
data = '{"sentMessage":['
|
||||
for row in queryreturn:
|
||||
msgid, toAddress, fromAddress, subject, lastactiontime, message, \
|
||||
encodingtype, status, ackdata = row
|
||||
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
|
||||
message = shared.fixPotentiallyInvalidUTF8Data(message)
|
||||
data += json.dumps({
|
||||
'msgid': hexlify(msgid),
|
||||
'toAddress': toAddress,
|
||||
'fromAddress': fromAddress,
|
||||
'subject': base64.b64encode(subject),
|
||||
'message': base64.b64encode(message),
|
||||
'encodingType': encodingtype,
|
||||
'lastActionTime': lastactiontime,
|
||||
'status': status,
|
||||
'ackData': hexlify(ackdata)}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleGetSentMessagesByAddress(self, params):
|
||||
"""Handle a request to get sent messages by address"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
fromAddress = params[0]
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT msgid, toaddress, fromaddress, subject, lastactiontime,"
|
||||
" message, encodingtype, status, ackdata FROM sent"
|
||||
" WHERE folder='sent' AND fromAddress=? ORDER BY lastactiontime",
|
||||
fromAddress
|
||||
)
|
||||
data = '{"sentMessages":['
|
||||
for row in queryreturn:
|
||||
msgid, toAddress, fromAddress, subject, lastactiontime, message, \
|
||||
encodingtype, status, ackdata = row
|
||||
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
|
||||
message = shared.fixPotentiallyInvalidUTF8Data(message)
|
||||
if len(data) > 25:
|
||||
data += ','
|
||||
data += json.dumps({
|
||||
'msgid': hexlify(msgid),
|
||||
'toAddress': toAddress,
|
||||
'fromAddress': fromAddress,
|
||||
'subject': base64.b64encode(subject),
|
||||
'message': base64.b64encode(message),
|
||||
'encodingType': encodingtype,
|
||||
'lastActionTime': lastactiontime,
|
||||
'status': status,
|
||||
'ackData': hexlify(ackdata)}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleGetSentMessagesByAckData(self, params):
|
||||
"""Handle a request to get sent messages by ack data"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
ackData = self._decode(params[0], "hex")
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT msgid, toaddress, fromaddress, subject, lastactiontime,"
|
||||
" message, encodingtype, status, ackdata FROM sent"
|
||||
" WHERE ackdata=?", ackData
|
||||
)
|
||||
data = '{"sentMessage":['
|
||||
for row in queryreturn:
|
||||
msgid, toAddress, fromAddress, subject, lastactiontime, message, \
|
||||
encodingtype, status, ackdata = row
|
||||
subject = shared.fixPotentiallyInvalidUTF8Data(subject)
|
||||
message = shared.fixPotentiallyInvalidUTF8Data(message)
|
||||
data += json.dumps({
|
||||
'msgid': hexlify(msgid),
|
||||
'toAddress': toAddress,
|
||||
'fromAddress': fromAddress,
|
||||
'subject': base64.b64encode(subject),
|
||||
'message': base64.b64encode(message),
|
||||
'encodingType': encodingtype,
|
||||
'lastActionTime': lastactiontime,
|
||||
'status': status,
|
||||
'ackData': hexlify(ackdata)}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleTrashMessage(self, params):
|
||||
"""Handle a request to trash a message by ID"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
msgid = self._decode(params[0], "hex")
|
||||
|
||||
# Trash if in inbox table
|
||||
helper_inbox.trash(msgid)
|
||||
# Trash if in sent table
|
||||
sqlExecute('''UPDATE sent SET folder='trash' WHERE msgid=?''', msgid)
|
||||
return 'Trashed message (assuming message existed).'
|
||||
|
||||
def HandleTrashInboxMessage(self, params):
|
||||
"""Handle a request to trash an inbox message by ID"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
msgid = self._decode(params[0], "hex")
|
||||
helper_inbox.trash(msgid)
|
||||
return 'Trashed inbox message (assuming message existed).'
|
||||
|
||||
def HandleTrashSentMessage(self, params):
|
||||
"""Handle a request to trash a sent message by ID"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
msgid = self._decode(params[0], "hex")
|
||||
sqlExecute('''UPDATE sent SET folder='trash' WHERE msgid=?''', msgid)
|
||||
return 'Trashed sent message (assuming message existed).'
|
||||
|
||||
def HandleSendMessage(self, params): # pylint: disable=too-many-locals
|
||||
"""Handle a request to send a message"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
|
||||
elif len(params) == 4:
|
||||
toAddress, fromAddress, subject, message = params
|
||||
encodingType = 2
|
||||
TTL = 4 * 24 * 60 * 60
|
||||
|
||||
elif len(params) == 5:
|
||||
toAddress, fromAddress, subject, message, encodingType = params
|
||||
TTL = 4 * 24 * 60 * 60
|
||||
|
||||
elif len(params) == 6:
|
||||
toAddress, fromAddress, subject, message, encodingType, TTL = \
|
||||
params
|
||||
|
||||
if encodingType not in [2, 3]:
|
||||
raise APIError(6, 'The encoding type must be 2 or 3.')
|
||||
subject = self._decode(subject, "base64")
|
||||
message = self._decode(message, "base64")
|
||||
if len(subject + message) > (2 ** 18 - 500):
|
||||
raise APIError(27, 'Message is too long.')
|
||||
if TTL < 60 * 60:
|
||||
TTL = 60 * 60
|
||||
if TTL > 28 * 24 * 60 * 60:
|
||||
TTL = 28 * 24 * 60 * 60
|
||||
toAddress = addBMIfNotPresent(toAddress)
|
||||
fromAddress = addBMIfNotPresent(fromAddress)
|
||||
status, addressVersionNumber, streamNumber, toRipe = \
|
||||
self._verifyAddress(toAddress)
|
||||
self._verifyAddress(fromAddress)
|
||||
try:
|
||||
fromAddressEnabled = BMConfigParser().getboolean(
|
||||
fromAddress, 'enabled')
|
||||
except BaseException:
|
||||
raise APIError(
|
||||
13, 'Could not find your fromAddress in the keys.dat file.')
|
||||
if not fromAddressEnabled:
|
||||
raise APIError(14, 'Your fromAddress is disabled. Cannot send.')
|
||||
|
||||
stealthLevel = BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'ackstealthlevel')
|
||||
ackdata = genAckPayload(streamNumber, stealthLevel)
|
||||
|
||||
t = ('',
|
||||
toAddress,
|
||||
toRipe,
|
||||
fromAddress,
|
||||
subject,
|
||||
message,
|
||||
ackdata,
|
||||
int(time.time()), # sentTime (this won't change)
|
||||
int(time.time()), # lastActionTime
|
||||
0,
|
||||
'msgqueued',
|
||||
0,
|
||||
'sent',
|
||||
2,
|
||||
TTL)
|
||||
helper_sent.insert(t)
|
||||
|
||||
toLabel = ''
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT label FROM addressbook WHERE address=?", toAddress)
|
||||
if queryreturn != []:
|
||||
for row in queryreturn:
|
||||
toLabel, = row
|
||||
queues.UISignalQueue.put(('displayNewSentMessage', (
|
||||
toAddress, toLabel, fromAddress, subject, message, ackdata)))
|
||||
|
||||
queues.workerQueue.put(('sendmessage', toAddress))
|
||||
|
||||
return hexlify(ackdata)
|
||||
|
||||
def HandleSendBroadcast(self, params):
|
||||
"""Handle a request to send a broadcast message"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
|
||||
if len(params) == 3:
|
||||
fromAddress, subject, message = params
|
||||
encodingType = 2
|
||||
TTL = 4 * 24 * 60 * 60
|
||||
|
||||
elif len(params) == 4:
|
||||
fromAddress, subject, message, encodingType = params
|
||||
TTL = 4 * 24 * 60 * 60
|
||||
elif len(params) == 5:
|
||||
fromAddress, subject, message, encodingType, TTL = params
|
||||
|
||||
if encodingType not in [2, 3]:
|
||||
raise APIError(6, 'The encoding type must be 2 or 3.')
|
||||
|
||||
subject = self._decode(subject, "base64")
|
||||
message = self._decode(message, "base64")
|
||||
if len(subject + message) > (2 ** 18 - 500):
|
||||
raise APIError(27, 'Message is too long.')
|
||||
if TTL < 60 * 60:
|
||||
TTL = 60 * 60
|
||||
if TTL > 28 * 24 * 60 * 60:
|
||||
TTL = 28 * 24 * 60 * 60
|
||||
fromAddress = addBMIfNotPresent(fromAddress)
|
||||
self._verifyAddress(fromAddress)
|
||||
try:
|
||||
BMConfigParser().getboolean(fromAddress, 'enabled')
|
||||
except BaseException:
|
||||
raise APIError(
|
||||
13, 'could not find your fromAddress in the keys.dat file.')
|
||||
streamNumber = decodeAddress(fromAddress)[2]
|
||||
ackdata = genAckPayload(streamNumber, 0)
|
||||
toAddress = '[Broadcast subscribers]'
|
||||
ripe = ''
|
||||
|
||||
t = ('',
|
||||
toAddress,
|
||||
ripe,
|
||||
fromAddress,
|
||||
subject,
|
||||
message,
|
||||
ackdata,
|
||||
int(time.time()), # sentTime (this doesn't change)
|
||||
int(time.time()), # lastActionTime
|
||||
0,
|
||||
'broadcastqueued',
|
||||
0,
|
||||
'sent',
|
||||
2,
|
||||
TTL)
|
||||
helper_sent.insert(t)
|
||||
|
||||
toLabel = '[Broadcast subscribers]'
|
||||
queues.UISignalQueue.put(('displayNewSentMessage', (
|
||||
toAddress, toLabel, fromAddress, subject, message, ackdata)))
|
||||
queues.workerQueue.put(('sendbroadcast', ''))
|
||||
|
||||
return hexlify(ackdata)
|
||||
|
||||
def HandleGetStatus(self, params):
|
||||
"""Handle a request to get the status of a sent message"""
|
||||
|
||||
if len(params) != 1:
|
||||
raise APIError(0, 'I need one parameter!')
|
||||
ackdata, = params
|
||||
if len(ackdata) < 76:
|
||||
# The length of ackData should be at least 38 bytes (76 hex digits)
|
||||
raise APIError(15, 'Invalid ackData object size.')
|
||||
ackdata = self._decode(ackdata, "hex")
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT status FROM sent where ackdata=?", ackdata)
|
||||
if queryreturn == []:
|
||||
return 'notfound'
|
||||
for row in queryreturn:
|
||||
status, = row
|
||||
return status
|
||||
|
||||
def HandleAddSubscription(self, params):
|
||||
"""Handle a request to add a subscription"""
|
||||
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
if len(params) == 1:
|
||||
address, = params
|
||||
label = ''
|
||||
if len(params) == 2:
|
||||
address, label = params
|
||||
label = self._decode(label, "base64")
|
||||
try:
|
||||
unicode(label, 'utf-8')
|
||||
except BaseException:
|
||||
raise APIError(17, 'Label is not valid UTF-8 data.')
|
||||
if len(params) > 2:
|
||||
raise APIError(0, 'I need either 1 or 2 parameters!')
|
||||
address = addBMIfNotPresent(address)
|
||||
self._verifyAddress(address)
|
||||
# First we must check to see if the address is already in the
|
||||
# subscriptions list.
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT * FROM subscriptions WHERE address=?", address)
|
||||
if queryreturn != []:
|
||||
raise APIError(16, 'You are already subscribed to that address.')
|
||||
sqlExecute(
|
||||
"INSERT INTO subscriptions VALUES (?,?,?)", label, address, True)
|
||||
shared.reloadBroadcastSendersForWhichImWatching()
|
||||
queues.UISignalQueue.put(('rerenderMessagelistFromLabels', ''))
|
||||
queues.UISignalQueue.put(('rerenderSubscriptions', ''))
|
||||
return 'Added subscription.'
|
||||
|
||||
def HandleDeleteSubscription(self, params):
|
||||
"""Handle a request to delete a subscription"""
|
||||
|
||||
if len(params) != 1:
|
||||
raise APIError(0, 'I need 1 parameter!')
|
||||
address, = params
|
||||
address = addBMIfNotPresent(address)
|
||||
sqlExecute('''DELETE FROM subscriptions WHERE address=?''', address)
|
||||
shared.reloadBroadcastSendersForWhichImWatching()
|
||||
queues.UISignalQueue.put(('rerenderMessagelistFromLabels', ''))
|
||||
queues.UISignalQueue.put(('rerenderSubscriptions', ''))
|
||||
return 'Deleted subscription if it existed.'
|
||||
|
||||
def ListSubscriptions(self, params):
|
||||
"""Handle a request to list susbcriptions"""
|
||||
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT label, address, enabled FROM subscriptions")
|
||||
data = {'subscriptions': []}
|
||||
for row in queryreturn:
|
||||
label, address, enabled = row
|
||||
label = shared.fixPotentiallyInvalidUTF8Data(label)
|
||||
data['subscriptions'].append({
|
||||
'label': base64.b64encode(label),
|
||||
'address': address,
|
||||
'enabled': enabled == 1
|
||||
})
|
||||
return json.dumps(data, indent=4, separators=(',', ': '))
|
||||
|
||||
def HandleDisseminatePreEncryptedMsg(self, params):
|
||||
"""Handle a request to disseminate an encrypted message"""
|
||||
|
||||
# The device issuing this command to PyBitmessage supplies a msg
|
||||
# object that has already been encrypted but which still needs the POW
|
||||
# to be done. PyBitmessage accepts this msg object and sends it out
|
||||
# to the rest of the Bitmessage network as if it had generated
|
||||
# the message itself. Please do not yet add this to the api doc.
|
||||
if len(params) != 3:
|
||||
raise APIError(0, 'I need 3 parameter!')
|
||||
encryptedPayload, requiredAverageProofOfWorkNonceTrialsPerByte, \
|
||||
requiredPayloadLengthExtraBytes = params
|
||||
encryptedPayload = self._decode(encryptedPayload, "hex")
|
||||
# Let us do the POW and attach it to the front
|
||||
target = 2**64 / (
|
||||
(
|
||||
len(encryptedPayload) + requiredPayloadLengthExtraBytes + 8
|
||||
) * requiredAverageProofOfWorkNonceTrialsPerByte
|
||||
)
|
||||
with threads.printLock:
|
||||
print(
|
||||
'(For msg message via API) Doing proof of work.'
|
||||
'Total required difficulty:',
|
||||
float(
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte
|
||||
) / defaults.networkDefaultProofOfWorkNonceTrialsPerByte,
|
||||
'Required small message difficulty:',
|
||||
float(
|
||||
requiredPayloadLengthExtraBytes
|
||||
) / defaults.networkDefaultPayloadLengthExtraBytes,
|
||||
)
|
||||
powStartTime = time.time()
|
||||
initialHash = hashlib.sha512(encryptedPayload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
with threads.printLock:
|
||||
print('(For msg message via API) Found proof of work', trialValue, 'Nonce:', nonce)
|
||||
try:
|
||||
print(
|
||||
'POW took', int(time.time() - powStartTime),
|
||||
'seconds.', nonce / (time.time() - powStartTime),
|
||||
'nonce trials per second.',
|
||||
)
|
||||
except BaseException:
|
||||
pass
|
||||
encryptedPayload = pack('>Q', nonce) + encryptedPayload
|
||||
toStreamNumber = decodeVarint(encryptedPayload[16:26])[0]
|
||||
inventoryHash = calculateInventoryHash(encryptedPayload)
|
||||
objectType = 2
|
||||
TTL = 2.5 * 24 * 60 * 60
|
||||
Inventory()[inventoryHash] = (
|
||||
objectType, toStreamNumber, encryptedPayload,
|
||||
int(time.time()) + TTL, ''
|
||||
)
|
||||
with threads.printLock:
|
||||
print('Broadcasting inv for msg(API disseminatePreEncryptedMsg command):', hexlify(inventoryHash))
|
||||
queues.invQueue.put((toStreamNumber, inventoryHash))
|
||||
|
||||
def HandleTrashSentMessageByAckDAta(self, params):
|
||||
"""Handle a request to trash a sent message by ackdata"""
|
||||
|
||||
# This API method should only be used when msgid is not available
|
||||
if not params:
|
||||
raise APIError(0, 'I need parameters!')
|
||||
ackdata = self._decode(params[0], "hex")
|
||||
sqlExecute("UPDATE sent SET folder='trash' WHERE ackdata=?", ackdata)
|
||||
return 'Trashed sent message (assuming message existed).'
|
||||
|
||||
def HandleDissimatePubKey(self, params):
|
||||
"""Handle a request to disseminate a public key"""
|
||||
|
||||
# The device issuing this command to PyBitmessage supplies a pubkey
|
||||
# object to be disseminated to the rest of the Bitmessage network.
|
||||
# PyBitmessage accepts this pubkey object and sends it out to the rest
|
||||
# of the Bitmessage network as if it had generated the pubkey object
|
||||
# itself. Please do not yet add this to the api doc.
|
||||
if len(params) != 1:
|
||||
raise APIError(0, 'I need 1 parameter!')
|
||||
payload, = params
|
||||
payload = self._decode(payload, "hex")
|
||||
|
||||
# Let us do the POW
|
||||
target = 2 ** 64 / ((
|
||||
len(payload) + defaults.networkDefaultPayloadLengthExtraBytes + 8
|
||||
) * defaults.networkDefaultProofOfWorkNonceTrialsPerByte)
|
||||
print('(For pubkey message via API) Doing proof of work...')
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
print('(For pubkey message via API) Found proof of work', trialValue, 'Nonce:', nonce)
|
||||
payload = pack('>Q', nonce) + payload
|
||||
|
||||
pubkeyReadPosition = 8 # bypass the nonce
|
||||
if payload[pubkeyReadPosition:pubkeyReadPosition + 4] == \
|
||||
'\x00\x00\x00\x00': # if this pubkey uses 8 byte time
|
||||
pubkeyReadPosition += 8
|
||||
else:
|
||||
pubkeyReadPosition += 4
|
||||
addressVersion, addressVersionLength = decodeVarint(
|
||||
payload[pubkeyReadPosition:pubkeyReadPosition + 10])
|
||||
pubkeyReadPosition += addressVersionLength
|
||||
pubkeyStreamNumber = decodeVarint(
|
||||
payload[pubkeyReadPosition:pubkeyReadPosition + 10])[0]
|
||||
inventoryHash = calculateInventoryHash(payload)
|
||||
objectType = 1 # .. todo::: support v4 pubkeys
|
||||
TTL = 28 * 24 * 60 * 60
|
||||
Inventory()[inventoryHash] = (
|
||||
objectType, pubkeyStreamNumber, payload, int(time.time()) + TTL, ''
|
||||
)
|
||||
with threads.printLock:
|
||||
print('broadcasting inv within API command disseminatePubkey with hash:', hexlify(inventoryHash))
|
||||
queues.invQueue.put((pubkeyStreamNumber, inventoryHash))
|
||||
|
||||
def HandleGetMessageDataByDestinationHash(self, params):
|
||||
"""Handle a request to get message data by destination hash"""
|
||||
|
||||
# Method will eventually be used by a particular Android app to
|
||||
# select relevant messages. Do not yet add this to the api
|
||||
# doc.
|
||||
if len(params) != 1:
|
||||
raise APIError(0, 'I need 1 parameter!')
|
||||
requestedHash, = params
|
||||
if len(requestedHash) != 32:
|
||||
raise APIError(
|
||||
19, 'The length of hash should be 32 bytes (encoded in hex'
|
||||
' thus 64 characters).')
|
||||
requestedHash = self._decode(requestedHash, "hex")
|
||||
|
||||
# This is not a particularly commonly used API function. Before we
|
||||
# use it we'll need to fill out a field in our inventory database
|
||||
# which is blank by default (first20bytesofencryptedmessage).
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT hash, payload FROM inventory WHERE tag = ''"
|
||||
" and objecttype = 2")
|
||||
with SqlBulkExecute() as sql:
|
||||
for row in queryreturn:
|
||||
hash01, payload = row
|
||||
readPosition = 16 # Nonce length + time length
|
||||
# Stream Number length
|
||||
readPosition += decodeVarint(
|
||||
payload[readPosition:readPosition + 10])[1]
|
||||
t = (payload[readPosition:readPosition + 32], hash01)
|
||||
sql.execute("UPDATE inventory SET tag=? WHERE hash=?", *t)
|
||||
|
||||
queryreturn = sqlQuery(
|
||||
"SELECT payload FROM inventory WHERE tag = ?", requestedHash)
|
||||
data = '{"receivedMessageDatas":['
|
||||
for row in queryreturn:
|
||||
payload, = row
|
||||
if len(data) > 25:
|
||||
data += ','
|
||||
data += json.dumps(
|
||||
{'data': hexlify(payload)}, indent=4, separators=(',', ': '))
|
||||
data += ']}'
|
||||
return data
|
||||
|
||||
def HandleClientStatus(self, params):
|
||||
"""Handle a request to get the status of the client"""
|
||||
|
||||
connections_num = len(network.stats.connectedHostsList())
|
||||
if connections_num == 0:
|
||||
networkStatus = 'notConnected'
|
||||
elif state.clientHasReceivedIncomingConnections:
|
||||
networkStatus = 'connectedAndReceivingIncomingConnections'
|
||||
else:
|
||||
networkStatus = 'connectedButHaveNotReceivedIncomingConnections'
|
||||
return json.dumps({
|
||||
'networkConnections': connections_num,
|
||||
'numberOfMessagesProcessed': state.numberOfMessagesProcessed,
|
||||
'numberOfBroadcastsProcessed': state.numberOfBroadcastsProcessed,
|
||||
'numberOfPubkeysProcessed': state.numberOfPubkeysProcessed,
|
||||
'networkStatus': networkStatus,
|
||||
'softwareName': 'PyBitmessage',
|
||||
'softwareVersion': softwareVersion
|
||||
}, indent=4, separators=(',', ': '))
|
||||
|
||||
def HandleDecodeAddress(self, params):
|
||||
"""Handle a request to decode an address"""
|
||||
|
||||
# Return a meaningful decoding of an address.
|
||||
if len(params) != 1:
|
||||
raise APIError(0, 'I need 1 parameter!')
|
||||
address, = params
|
||||
status, addressVersion, streamNumber, ripe = decodeAddress(address)
|
||||
return json.dumps({
|
||||
'status': status,
|
||||
'addressVersion': addressVersion,
|
||||
'streamNumber': streamNumber,
|
||||
'ripe': base64.b64encode(ripe)
|
||||
}, indent=4, separators=(',', ': '))
|
||||
|
||||
def HandleHelloWorld(self, params):
|
||||
"""Test two string params"""
|
||||
|
||||
a, b = params
|
||||
return a + '-' + b
|
||||
|
||||
def HandleAdd(self, params):
|
||||
"""Test two numeric params"""
|
||||
|
||||
a, b = params
|
||||
return a + b
|
||||
|
||||
def HandleStatusBar(self, params):
|
||||
"""Handle a request to update the status bar"""
|
||||
|
||||
message, = params
|
||||
queues.UISignalQueue.put(('updateStatusBar', message))
|
||||
|
||||
def HandleDeleteAndVacuum(self, params):
|
||||
"""Handle a request to run the deleteandvacuum stored procedure"""
|
||||
|
||||
if not params:
|
||||
sqlStoredProcedure('deleteandvacuume')
|
||||
return 'done'
|
||||
return None
|
||||
|
||||
def HandleShutdown(self, params):
|
||||
"""Handle a request to shutdown the node"""
|
||||
|
||||
if not params:
|
||||
# backward compatible trick because False == 0 is True
|
||||
state.shutdown = False
|
||||
return 'done'
|
||||
return None
|
||||
|
||||
handlers = {}
|
||||
handlers['helloWorld'] = HandleHelloWorld
|
||||
handlers['add'] = HandleAdd
|
||||
handlers['statusBar'] = HandleStatusBar
|
||||
handlers['listAddresses'] = HandleListAddresses
|
||||
handlers['listAddressBookEntries'] = HandleListAddressBookEntries
|
||||
# the listAddressbook alias should be removed eventually.
|
||||
handlers['listAddressbook'] = HandleListAddressBookEntries
|
||||
handlers['addAddressBookEntry'] = HandleAddAddressBookEntry
|
||||
# the addAddressbook alias should be deleted eventually.
|
||||
handlers['addAddressbook'] = HandleAddAddressBookEntry
|
||||
handlers['deleteAddressBookEntry'] = HandleDeleteAddressBookEntry
|
||||
# The deleteAddressbook alias should be deleted eventually.
|
||||
handlers['deleteAddressbook'] = HandleDeleteAddressBookEntry
|
||||
handlers['createRandomAddress'] = HandleCreateRandomAddress
|
||||
handlers['createDeterministicAddresses'] = \
|
||||
HandleCreateDeterministicAddresses
|
||||
handlers['getDeterministicAddress'] = HandleGetDeterministicAddress
|
||||
handlers['createChan'] = HandleCreateChan
|
||||
handlers['joinChan'] = HandleJoinChan
|
||||
handlers['leaveChan'] = HandleLeaveChan
|
||||
handlers['deleteAddress'] = HandleDeleteAddress
|
||||
handlers['getAllInboxMessages'] = HandleGetAllInboxMessages
|
||||
handlers['getAllInboxMessageIds'] = HandleGetAllInboxMessageIds
|
||||
handlers['getAllInboxMessageIDs'] = HandleGetAllInboxMessageIds
|
||||
handlers['getInboxMessageById'] = HandleGetInboxMessageById
|
||||
handlers['getInboxMessageByID'] = HandleGetInboxMessageById
|
||||
handlers['getAllSentMessages'] = HandleGetAllSentMessages
|
||||
handlers['getAllSentMessageIds'] = HandleGetAllSentMessageIds
|
||||
handlers['getAllSentMessageIDs'] = HandleGetAllSentMessageIds
|
||||
handlers['getInboxMessagesByReceiver'] = HandleInboxMessagesByReceiver
|
||||
# after some time getInboxMessagesByAddress should be removed
|
||||
handlers['getInboxMessagesByAddress'] = HandleInboxMessagesByReceiver
|
||||
handlers['getSentMessageById'] = HandleGetSentMessageById
|
||||
handlers['getSentMessageByID'] = HandleGetSentMessageById
|
||||
handlers['getSentMessagesByAddress'] = HandleGetSentMessagesByAddress
|
||||
handlers['getSentMessagesBySender'] = HandleGetSentMessagesByAddress
|
||||
handlers['getSentMessageByAckData'] = HandleGetSentMessagesByAckData
|
||||
handlers['trashMessage'] = HandleTrashMessage
|
||||
handlers['trashInboxMessage'] = HandleTrashInboxMessage
|
||||
handlers['trashSentMessage'] = HandleTrashSentMessage
|
||||
handlers['trashSentMessageByAckData'] = HandleTrashSentMessageByAckDAta
|
||||
handlers['sendMessage'] = HandleSendMessage
|
||||
handlers['sendBroadcast'] = HandleSendBroadcast
|
||||
handlers['getStatus'] = HandleGetStatus
|
||||
handlers['addSubscription'] = HandleAddSubscription
|
||||
handlers['deleteSubscription'] = HandleDeleteSubscription
|
||||
handlers['listSubscriptions'] = ListSubscriptions
|
||||
handlers['disseminatePreEncryptedMsg'] = HandleDisseminatePreEncryptedMsg
|
||||
handlers['disseminatePubkey'] = HandleDissimatePubKey
|
||||
handlers['getMessageDataByDestinationHash'] = \
|
||||
HandleGetMessageDataByDestinationHash
|
||||
handlers['getMessageDataByDestinationTag'] = \
|
||||
HandleGetMessageDataByDestinationHash
|
||||
handlers['clientStatus'] = HandleClientStatus
|
||||
handlers['decodeAddress'] = HandleDecodeAddress
|
||||
handlers['deleteAndVacuum'] = HandleDeleteAndVacuum
|
||||
handlers['shutdown'] = HandleShutdown
|
||||
|
||||
def _handle_request(self, method, params):
|
||||
if method not in self.handlers:
|
||||
raise APIError(20, 'Invalid method: %s' % method)
|
||||
result = self.handlers[method](self, params)
|
||||
state.last_api_response = time.time()
|
||||
return result
|
||||
|
||||
def _dispatch(self, method, params):
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.cookies = []
|
||||
|
||||
validuser = self.APIAuthenticateClient()
|
||||
if not validuser:
|
||||
time.sleep(2)
|
||||
return "RPC Username or password incorrect or HTTP header lacks authentication at all."
|
||||
|
||||
try:
|
||||
return self._handle_request(method, params)
|
||||
except APIError as e:
|
||||
return str(e)
|
||||
except varintDecodeError as e:
|
||||
logger.error(e)
|
||||
return "API Error 0026: Data contains a malformed varint. Some details: %s" % e
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
return "API Error 0021: Unexpected API Failure - %s" % e
|
|
@ -1,7 +1,6 @@
|
|||
from turtle import pd
|
||||
from pybitmessage.get_platform import platform
|
||||
from pybitmessage import kivy_helper_search
|
||||
from pybitmessage.helper_sql import sqlExecute
|
||||
from functools import partial
|
||||
from kivy.clock import Clock
|
||||
from kivy.properties import (
|
||||
|
@ -49,14 +48,8 @@ class AddressBook(Screen):
|
|||
what = state.searcing_text
|
||||
xAddress = ''
|
||||
self.ids.tag_label.text = ''
|
||||
# self.queryreturn = kivy_helper_search.search_sql(
|
||||
# xAddress, account, "addressbook", where, what, False)
|
||||
# self.queryreturn = [obj for obj in reversed(self.queryreturn)]
|
||||
if self.queryreturn:
|
||||
self.ids.tag_label.text = 'Address Book'
|
||||
self.has_refreshed = True
|
||||
self.set_mdList(0, 20)
|
||||
self.ids.scroll_y.bind(scroll_y=self.check_scroll_y)
|
||||
pass
|
||||
else:
|
||||
content = MDLabel(
|
||||
font_style='Caption',
|
||||
|
@ -68,42 +61,12 @@ class AddressBook(Screen):
|
|||
valign='top')
|
||||
self.ids.ml.add_widget(content)
|
||||
|
||||
def set_mdList(self, start_index, end_index):
|
||||
"""Creating the mdList"""
|
||||
for item in self.queryreturn[start_index:end_index]:
|
||||
message_row = SwipeToDeleteItem(
|
||||
text=item[0],
|
||||
)
|
||||
listItem = message_row.ids.content
|
||||
listItem.secondary_text = item[1]
|
||||
listItem.theme_text_color = "Custom"
|
||||
listItem.text_color = ThemeClsColor
|
||||
# listItem.add_widget(AvatarSampleWidget(
|
||||
# source=state.imageDir + '/text_images/{}.png'.format(
|
||||
# avatarImageFirstLetter(item[0].strip()))))
|
||||
image = state.imageDir + "/text_images/{}.png".format(
|
||||
avatarImageFirstLetter(item[0].strip()))
|
||||
message_row.ids.avater_img.source = image
|
||||
listItem.bind(on_release=partial(
|
||||
self.addBook_detail, item[1], item[0], message_row))
|
||||
message_row.ids.delete_msg.bind(on_press=partial(self.delete_address, item[1]))
|
||||
self.ids.ml.add_widget(message_row)
|
||||
|
||||
def check_scroll_y(self, instance, somethingelse):
|
||||
"""Load data on scroll"""
|
||||
if self.ids.scroll_y.scroll_y <= -0.0 and self.has_refreshed:
|
||||
self.ids.scroll_y.scroll_y = 0.06
|
||||
exist_addresses = len(self.ids.ml.children)
|
||||
if exist_addresses != len(self.queryreturn):
|
||||
self.update_addressBook_on_scroll(exist_addresses)
|
||||
self.has_refreshed = (
|
||||
True if exist_addresses != len(self.queryreturn) else False
|
||||
)
|
||||
|
||||
def update_addressBook_on_scroll(self, exist_addresses):
|
||||
"""Load more data on scroll down"""
|
||||
self.set_mdList(exist_addresses, exist_addresses + 5)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def refreshs(*args):
|
||||
"""Refresh the Widget"""
|
||||
|
@ -151,8 +114,6 @@ class AddressBook(Screen):
|
|||
# if len(self.ids.ml.children) == 0:
|
||||
if self.ids.ml.children is not None:
|
||||
self.ids.tag_label.text = ''
|
||||
# sqlExecute(
|
||||
# "DELETE FROM addressbook WHERE address = '{}';".format(address))
|
||||
toast('Address Deleted')
|
||||
|
||||
def close_pop(self, instance):
|
||||
|
@ -170,10 +131,6 @@ class AddressBook(Screen):
|
|||
if label in stored_labels and self.address == add_dict[label]:
|
||||
stored_labels.remove(label)
|
||||
if label and label not in stored_labels:
|
||||
# sqlExecute(
|
||||
# "UPDATE addressbook SET label = '{}' WHERE"
|
||||
# " address = '{}';".format(
|
||||
# label, self.addbook_popup.content_cls.address))
|
||||
state.kivyapp.root.ids.sc11.ids.ml.clear_widgets()
|
||||
state.kivyapp.root.ids.sc11.loadAddresslist(None, 'All', '')
|
||||
self.addbook_popup.dismiss()
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
from pybitmessage.bmconfigparser import BMConfigParser
|
||||
from pybitmessage.helper_sql import sqlExecute, sqlQuery
|
||||
from functools import partial
|
||||
from kivy.clock import Clock
|
||||
from kivy.properties import (
|
||||
|
@ -200,18 +198,3 @@ class Allmails(Screen):
|
|||
nav_lay_obj.sc17.remove_widget(instance.parent.parent)
|
||||
toast('Deleted')
|
||||
|
||||
def refresh_callback(self, *args):
|
||||
"""Method updates the state of application,
|
||||
While the spinner remains on the screen"""
|
||||
def refresh_callback(interval):
|
||||
"""Load the allmails screen data"""
|
||||
self.ids.ml.clear_widgets()
|
||||
self.remove_widget(self.children[1])
|
||||
try:
|
||||
screens_obj = self.parent.screens[16]
|
||||
except Exception:
|
||||
screens_obj = self.parent.parent.screens[16]
|
||||
screens_obj.add_widget(Allmails())
|
||||
self.ids.refresh_layout.refresh_done()
|
||||
self.tick = 0
|
||||
Clock.schedule_once(refresh_callback, 1)
|
||||
|
|
|
@ -50,26 +50,6 @@ def chipTag(text):
|
|||
return obj
|
||||
|
||||
|
||||
# def initailize_detail_page(manager):
|
||||
# if not manager.has_screen(
|
||||
# data_screens['MailDetail']["name_screen"]
|
||||
# ):
|
||||
# Builder.load_file(
|
||||
# os.path.join(
|
||||
# # os.environ["KITCHEN_SINK_ROOT"],
|
||||
# os.path.dirname(os.path.dirname(__file__)),
|
||||
# "kv",
|
||||
# "maildetail.kv",
|
||||
# )
|
||||
# )
|
||||
# if "Import" in data_screens['MailDetail']:
|
||||
# exec(data_screens['MailDetail']["Import"])
|
||||
# screen_object = eval(data_screens['MailDetail']["Factory"])
|
||||
# data_screens['MailDetail']["object"] = screen_object
|
||||
# manager.add_widget(screen_object)
|
||||
# manager.current = data_screens['MailDetail']["name_screen"]
|
||||
|
||||
|
||||
def toast(text):
|
||||
"""Method will display the toast message"""
|
||||
kivytoast.toast(text)
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
import time
|
||||
|
||||
from pybitmessage import kivy_helper_search
|
||||
from pybitmessage.bmconfigparser import BMConfigParser
|
||||
from pybitmessage.helper_sql import sqlExecute
|
||||
from functools import partial
|
||||
from pybitmessage.addresses import decodeAddress
|
||||
from kivy.clock import Clock
|
||||
|
@ -144,7 +141,6 @@ class Draft(Screen):
|
|||
|
||||
def delete_draft(self, data_index, instance, *args):
|
||||
"""Delete draft message permanently"""
|
||||
sqlExecute("DELETE FROM sent WHERE ackdata = ?;", data_index)
|
||||
if int(state.draft_count) > 0:
|
||||
state.draft_count = str(int(state.draft_count) - 1)
|
||||
self.set_draftCnt(state.draft_count)
|
||||
|
@ -170,26 +166,6 @@ class Draft(Screen):
|
|||
toAddress = addBMIfNotPresent(toAddress)
|
||||
stealthLevel = BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'ackstealthlevel')
|
||||
# from helper_ackPayload import genAckPayload
|
||||
# ackdata = genAckPayload(streamNumber, stealthLevel)
|
||||
# sqlExecute(
|
||||
# '''INSERT INTO sent VALUES
|
||||
# (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''',
|
||||
# '',
|
||||
# toAddress,
|
||||
# ripe,
|
||||
# fromAddress,
|
||||
# subject,
|
||||
# message,
|
||||
# ackdata,
|
||||
# int(time.time()),
|
||||
# int(time.time()),
|
||||
# 0,
|
||||
# 'msgqueued',
|
||||
# 0,
|
||||
# 'draft',
|
||||
# encoding,
|
||||
# BMConfigParser().safeGetInt('bitmessagesettings', 'ttl'))
|
||||
state.msg_counter_objs = src_object.children[2].children[0].ids
|
||||
state.draft_count = str(int(state.draft_count) + 1) \
|
||||
if state.association == fromAddress else state.draft_count
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
from pybitmessage.helper_sql import sqlExecute
|
||||
from functools import partial
|
||||
from kivy.clock import Clock
|
||||
from kivy.metrics import dp
|
||||
from kivy.properties import (
|
||||
ListProperty,
|
||||
StringProperty
|
||||
|
@ -11,11 +8,7 @@ from kivymd.uix.label import MDLabel
|
|||
|
||||
from pybitmessage import state
|
||||
|
||||
from pybitmessage.baseclass.common import (
|
||||
showLimitedCnt, avatarImageFirstLetter,
|
||||
ThemeClsColor, toast, SwipeToDeleteItem,
|
||||
ShowTimeHistoy
|
||||
)
|
||||
from pybitmessage.baseclass.common import showLimitedCnt
|
||||
|
||||
|
||||
class Inbox(Screen):
|
||||
|
@ -52,33 +45,8 @@ class Inbox(Screen):
|
|||
xAddress = "toaddress"
|
||||
data = []
|
||||
self.ids.tag_label.text = ""
|
||||
self.inboxDataQuery(xAddress, where, what)
|
||||
self.ids.tag_label.text = ""
|
||||
if self.queryreturn:
|
||||
self.ids.tag_label.text = "Inbox"
|
||||
state.kivyapp.get_inbox_count()
|
||||
self.set_inboxCount(state.inbox_count)
|
||||
for mail in self.queryreturn:
|
||||
body = mail[3].decode() if isinstance(mail[3], bytes) else mail[3]
|
||||
subject = mail[5].decode() if isinstance(mail[5], bytes) else mail[5]
|
||||
data.append(
|
||||
{
|
||||
"text": mail[4].strip(),
|
||||
"secondary_text": (
|
||||
subject[:50] + "........"
|
||||
if len(subject) >= 50
|
||||
else (subject + "," + body)[0:50] + "........"
|
||||
)
|
||||
.replace("\t", "")
|
||||
.replace(" ", ""),
|
||||
"msgid": mail[1],
|
||||
"received": mail[6]
|
||||
}
|
||||
)
|
||||
|
||||
self.has_refreshed = True
|
||||
self.set_mdList(data)
|
||||
self.ids.scroll_y.bind(scroll_y=self.check_scroll_y)
|
||||
pass
|
||||
else:
|
||||
self.set_inboxCount("0")
|
||||
content = MDLabel(
|
||||
|
@ -102,125 +70,9 @@ class Inbox(Screen):
|
|||
int(state.sent_count) + int(state.inbox_count))
|
||||
src_mng_obj.allmail_cnt.ids.badge_txt.text = showLimitedCnt(int(state.all_count))
|
||||
|
||||
def inboxDataQuery(self, xAddress, where, what, start_indx=0, end_indx=20):
|
||||
"""This method is used for retrieving inbox data"""
|
||||
pass
|
||||
|
||||
def set_mdList(self, data):
|
||||
"""This method is used to create the mdList"""
|
||||
total_message = len(self.ids.ml.children)
|
||||
for item in data:
|
||||
message_row = SwipeToDeleteItem(
|
||||
text=item["text"],
|
||||
)
|
||||
listItem = message_row.ids.content
|
||||
listItem.secondary_text = item["secondary_text"]
|
||||
listItem.theme_text_color = "Custom"
|
||||
listItem.text_color = ThemeClsColor
|
||||
listItem._txt_right_pad = dp(70)
|
||||
image = state.imageDir + "/text_images/{}.png".format(
|
||||
avatarImageFirstLetter(item["secondary_text"].strip()))
|
||||
message_row.ids.avater_img.source = image
|
||||
listItem.bind(on_release=partial(self.inbox_detail, item["msgid"], message_row))
|
||||
message_row.ids.time_tag.text = str(ShowTimeHistoy(item["received"]))
|
||||
message_row.ids.delete_msg.bind(on_press=partial(self.delete, item["msgid"]))
|
||||
self.ids.ml.add_widget(message_row)
|
||||
update_message = len(self.ids.ml.children)
|
||||
self.has_refreshed = True if total_message != update_message else False
|
||||
|
||||
def check_scroll_y(self, instance, somethingelse):
|
||||
"""Loads data on scroll"""
|
||||
if self.ids.scroll_y.scroll_y <= -0.0 and self.has_refreshed:
|
||||
self.ids.scroll_y.scroll_y = 0.06
|
||||
total_message = len(self.ids.ml.children)
|
||||
self.update_inbox_screen_on_scroll(total_message)
|
||||
|
||||
def update_inbox_screen_on_scroll(self, total_message, where="", what=""):
|
||||
"""This method is used to load more data on scroll down"""
|
||||
data = []
|
||||
if state.searcing_text:
|
||||
where = ["subject", "message"]
|
||||
what = state.searcing_text
|
||||
self.inboxDataQuery("toaddress", where, what, total_message, 5)
|
||||
for mail in self.queryreturn:
|
||||
subject = mail[3].decode() if isinstance(mail[3], bytes) else mail[3]
|
||||
body = mail[5].decode() if isinstance(mail[5], bytes) else mail[5]
|
||||
data.append(
|
||||
{
|
||||
"text": mail[4].strip(),
|
||||
"secondary_text": body[:50] + "........"
|
||||
if len(body) >= 50
|
||||
else (body + "," + subject.replace("\n", ""))[0:50] + "........",
|
||||
"msgid": mail[1],
|
||||
"received": mail[6]
|
||||
}
|
||||
)
|
||||
self.set_mdList(data)
|
||||
|
||||
def inbox_detail(self, msg_id, instance, *args):
|
||||
"""Load inbox page details"""
|
||||
if instance.state == 'closed':
|
||||
instance.ids.delete_msg.disabled = True
|
||||
if instance.open_progress == 0.0:
|
||||
state.detailPageType = "inbox"
|
||||
state.mail_id = msg_id
|
||||
if self.manager:
|
||||
src_mng_obj = self.manager
|
||||
else:
|
||||
src_mng_obj = self.parent.parent
|
||||
src_mng_obj.screens[11].clear_widgets()
|
||||
# src_mng_obj.screens[11].add_widget(MailDetail())
|
||||
src_mng_obj.current = "mailDetail"
|
||||
else:
|
||||
instance.ids.delete_msg.disabled = False
|
||||
|
||||
def delete(self, data_index, instance, *args):
|
||||
"""Delete inbox mail from inbox listing"""
|
||||
sqlExecute("UPDATE inbox SET folder = 'trash' WHERE msgid = ?;", data_index)
|
||||
msg_count_objs = self.parent.parent.ids.content_drawer.ids
|
||||
if int(state.inbox_count) > 0:
|
||||
msg_count_objs.inbox_cnt.ids.badge_txt.text = showLimitedCnt(
|
||||
int(state.inbox_count) - 1
|
||||
)
|
||||
msg_count_objs.trash_cnt.ids.badge_txt.text = showLimitedCnt(
|
||||
int(state.trash_count) + 1
|
||||
)
|
||||
state.inbox_count = str(int(state.inbox_count) - 1)
|
||||
state.trash_count = str(int(state.trash_count) + 1)
|
||||
if int(state.all_count) > 0:
|
||||
msg_count_objs.allmail_cnt.ids.badge_txt.text = showLimitedCnt(
|
||||
int(state.all_count) - 1
|
||||
)
|
||||
state.all_count = str(int(state.all_count) - 1)
|
||||
|
||||
if int(state.inbox_count) <= 0:
|
||||
self.ids.tag_label.text = ''
|
||||
self.ids.ml.remove_widget(
|
||||
instance.parent.parent)
|
||||
toast('Deleted')
|
||||
|
||||
def archive(self, data_index, instance, *args):
|
||||
"""Archive inbox mail from inbox listing"""
|
||||
# sqlExecute("UPDATE inbox SET folder = 'trash' WHERE msgid = ?;", data_index)
|
||||
self.ids.ml.remove_widget(instance.parent.parent)
|
||||
self.update_trash()
|
||||
|
||||
def update_trash(self):
|
||||
"""Update trash screen mails which is deleted from inbox"""
|
||||
self.manager.parent.ids.sc5.clear_widgets()
|
||||
|
||||
def refresh_callback(self, *args):
|
||||
"""Method updates the state of application,
|
||||
While the spinner remains on the screen"""
|
||||
|
||||
def refresh_callback(interval):
|
||||
"""Method used for loading the inbox screen data"""
|
||||
state.searcing_text = ""
|
||||
self.children[2].children[1].ids.search_field.text = ""
|
||||
self.ids.ml.clear_widgets()
|
||||
self.loadMessagelist(state.association)
|
||||
self.has_refreshed = True
|
||||
self.ids.refresh_layout.refresh_done()
|
||||
self.tick = 0
|
||||
|
||||
Clock.schedule_once(refresh_callback, 1)
|
||||
|
|
|
@ -2,7 +2,6 @@ from datetime import datetime
|
|||
|
||||
# from pybitmessage.get_platform import platform
|
||||
platform = "linux"
|
||||
from pybitmessage.helper_sql import sqlExecute, sqlQuery
|
||||
|
||||
from kivy.core.clipboard import Clipboard
|
||||
from kivy.clock import Clock
|
||||
|
@ -100,17 +99,13 @@ class MailDetail(Screen): # pylint: disable=too-many-instance-attributes
|
|||
self.page_type = state.detailPageType if state.detailPageType else ''
|
||||
try:
|
||||
if state.detailPageType == 'sent' or state.detailPageType == 'draft':
|
||||
data = sqlQuery(
|
||||
"select toaddress, fromaddress, subject, message, status,"
|
||||
" ackdata, senttime from sent where ackdata = ?;", state.mail_id)
|
||||
data = []
|
||||
state.status = self
|
||||
state.ackdata = data[0][5]
|
||||
self.assign_mail_details(data)
|
||||
state.kivyapp.set_mail_detail_header()
|
||||
elif state.detailPageType == 'inbox':
|
||||
data = sqlQuery(
|
||||
"select toaddress, fromaddress, subject, message, received from inbox"
|
||||
" where msgid = ?;", state.mail_id)
|
||||
data = []
|
||||
self.assign_mail_details(data)
|
||||
state.kivyapp.set_mail_detail_header()
|
||||
except Exception as e:
|
||||
|
@ -140,18 +135,12 @@ class MailDetail(Screen): # pylint: disable=too-many-instance-attributes
|
|||
self.children[0].children[0].active = True
|
||||
if state.detailPageType == 'sent':
|
||||
state.kivyapp.root.ids.sc4.ids.sent_search.ids.search_field.text = ''
|
||||
sqlExecute(
|
||||
"UPDATE sent SET folder = 'trash' WHERE"
|
||||
" ackdata = ?;", state.mail_id)
|
||||
msg_count_objs.send_cnt.ids.badge_txt.text = str(int(state.sent_count) - 1)
|
||||
state.sent_count = str(int(state.sent_count) - 1)
|
||||
self.parent.screens[2].ids.ml.clear_widgets()
|
||||
self.parent.screens[2].loadSent(state.association)
|
||||
elif state.detailPageType == 'inbox':
|
||||
state.kivyapp.root.ids.sc1.ids.inbox_search.ids.search_field.text = ''
|
||||
sqlExecute(
|
||||
"UPDATE inbox SET folder = 'trash' WHERE"
|
||||
" msgid = ?;", state.mail_id)
|
||||
msg_count_objs.inbox_cnt.ids.badge_txt.text = str(
|
||||
int(state.inbox_count) - 1)
|
||||
state.inbox_count = str(int(state.inbox_count) - 1)
|
||||
|
@ -159,7 +148,6 @@ class MailDetail(Screen): # pylint: disable=too-many-instance-attributes
|
|||
self.parent.screens[0].loadMessagelist(state.association)
|
||||
|
||||
elif state.detailPageType == 'draft':
|
||||
sqlExecute("DELETE FROM sent WHERE ackdata = ?;", state.mail_id)
|
||||
msg_count_objs.draft_cnt.ids.badge_txt.text = str(
|
||||
int(state.draft_count) - 1)
|
||||
state.draft_count = str(int(state.draft_count) - 1)
|
||||
|
|
|
@ -2,7 +2,6 @@ import time
|
|||
|
||||
from pybitmessage.get_platform import platform
|
||||
from pybitmessage.bmconfigparser import BMConfigParser
|
||||
from pybitmessage.helper_sql import sqlExecute, sqlQuery
|
||||
from kivy.clock import Clock
|
||||
from kivy.core.window import Window
|
||||
from kivy.factory import Factory
|
||||
|
@ -93,25 +92,9 @@ class DropDownWidget(BoxLayout):
|
|||
if status == "success":
|
||||
navApp.root.ids.sc3.children[0].active = True
|
||||
if state.detailPageType == "draft" and state.send_draft_mail:
|
||||
# sqlExecute(
|
||||
# "UPDATE sent SET toaddress = ?"
|
||||
# ", fromaddress = ? , subject = ?"
|
||||
# ", message = ?, folder = 'sent'"
|
||||
# ", senttime = ?, lastactiontime = ?"
|
||||
# " WHERE ackdata = ?;",
|
||||
# toAddress,
|
||||
# fromAddress,
|
||||
# subject,
|
||||
# message,
|
||||
# int(time.time()),
|
||||
# int(time.time()),
|
||||
# state.send_draft_mail)
|
||||
self.parent.parent.screens[13].clear_widgets()
|
||||
self.parent.parent.screens[13].add_widget(Factory.Draft())
|
||||
# state.detailPageType = ''
|
||||
# state.send_draft_mail = None
|
||||
else:
|
||||
# toAddress = addBMIfNotPresent(toAddress)
|
||||
if (addressVersionNumber > 4) or (
|
||||
addressVersionNumber <= 1):
|
||||
print(
|
||||
|
@ -121,36 +104,11 @@ class DropDownWidget(BoxLayout):
|
|||
print("streamNumber > 1 or streamNumber == 0")
|
||||
stealthLevel = BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'ackstealthlevel')
|
||||
# from helper_ackPayload import genAckPayload
|
||||
# ackdata = genAckPayload(streamNumber, stealthLevel)
|
||||
# t = ()
|
||||
# sqlExecute(
|
||||
# '''INSERT INTO sent VALUES
|
||||
# (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''',
|
||||
# '',
|
||||
# addBMIfNotPresent(toAddress),
|
||||
# ripe,
|
||||
# fromAddress,
|
||||
# subject,
|
||||
# message,
|
||||
# genAckPayload(streamNumber, stealthLevel), #ackdata
|
||||
# int(time.time()),
|
||||
# int(time.time()),
|
||||
# 0,
|
||||
# 'msgqueued',
|
||||
# 0,
|
||||
# 'sent',
|
||||
# 3, #encoding
|
||||
# BMConfigParser().safeGetInt(
|
||||
# 'bitmessagesettings', 'ttl'))
|
||||
|
||||
state.check_sent_acc = fromAddress
|
||||
# state.msg_counter_objs = self.parent.parent.parent.parent\
|
||||
# .parent.parent.children[2].children[0].ids
|
||||
if state.detailPageType == 'draft' \
|
||||
and state.send_draft_mail:
|
||||
state.draft_count = str(int(state.draft_count) - 1)
|
||||
# state.msg_counter_objs.draft_cnt.badge_text = (
|
||||
# state.draft_count)
|
||||
state.detailPageType = ''
|
||||
state.send_draft_mail = None
|
||||
self.parent.parent.parent.ids.sc4.update_sent_messagelist()
|
||||
|
@ -237,23 +195,8 @@ class DropDownWidget(BoxLayout):
|
|||
"""Callback of alert box"""
|
||||
dialog_box.dismiss()
|
||||
toast(text_item)
|
||||
|
||||
|
||||
|
||||
# class HoverItem(MDBoxLayout, ThemableBehavior, HoverBehavior):
|
||||
# '''Custom item implementing hover behavior.'''
|
||||
# def __init__(self, **kwargs):
|
||||
# """Getting Text Input."""
|
||||
# super(HoverItem, self).__init__(**kwargs)
|
||||
# # import pdb; pdb.set_trace()
|
||||
|
||||
# def on_enter(self):
|
||||
# # import pdb; pdb.set_trace()
|
||||
# Window.set_system_cursor('hand')
|
||||
|
||||
# def on_leave(self):
|
||||
# Window.set_system_cursor('arrow')
|
||||
|
||||
class MyTextInput(MDTextField):
|
||||
"""MyTextInput class for kivy Ui"""
|
||||
|
||||
|
@ -266,8 +209,6 @@ class MyTextInput(MDTextField):
|
|||
def __init__(self, **kwargs):
|
||||
"""Getting Text Input."""
|
||||
super(MyTextInput, self).__init__(**kwargs)
|
||||
# import pdb; pdb.set_trace()
|
||||
|
||||
self.__lineBreak__ = 0
|
||||
|
||||
def on_text(self, instance, value): # pylint: disable=unused-argument
|
||||
|
|
|
@ -174,11 +174,6 @@ class MyAddress(Screen):
|
|||
dialog_box.dismiss()
|
||||
toast(text_item)
|
||||
|
||||
# @staticmethod
|
||||
# def callback_for_menu_items(text_item, *arg):
|
||||
# """Callback of alert box"""
|
||||
# toast(text_item)
|
||||
|
||||
def refresh_callback(self, *args):
|
||||
"""Method updates the state of application,
|
||||
While the spinner remains on the screen"""
|
||||
|
|
|
@ -4,9 +4,6 @@ from kivy.clock import Clock
|
|||
from kivy.properties import StringProperty
|
||||
from kivy.uix.screenmanager import Screen
|
||||
|
||||
# from pybitmessage.network import objectracker, stats
|
||||
|
||||
|
||||
class NetworkStat(Screen):
|
||||
"""NetworkStat class for kivy Ui"""
|
||||
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
# from pybitmessage import identiconGeneration
|
||||
from pybitmessage import kivy_helper_search
|
||||
from pybitmessage.bmconfigparser import BMConfigParser
|
||||
from functools import partial
|
||||
from pybitmessage.helper_sql import sqlExecute
|
||||
from kivy.clock import Clock
|
||||
from kivy.factory import Factory
|
||||
from kivy.properties import StringProperty, ListProperty
|
||||
|
@ -207,17 +204,11 @@ class Sent(Screen):
|
|||
state.all_count = str(int(state.all_count) - 1)
|
||||
if int(state.sent_count) <= 0:
|
||||
self.ids.tag_label.text = ''
|
||||
sqlExecute(
|
||||
"UPDATE sent SET folder = 'trash'"
|
||||
" WHERE ackdata = ?;", data_index)
|
||||
self.ids.ml.remove_widget(instance.parent.parent)
|
||||
toast('Deleted')
|
||||
|
||||
def archive(self, data_index, instance, *args):
|
||||
"""Archive sent mail from sent mail listing"""
|
||||
sqlExecute(
|
||||
"UPDATE sent SET folder = 'trash'"
|
||||
" WHERE ackdata = ?;", data_index)
|
||||
self.ids.ml.remove_widget(instance.parent.parent)
|
||||
self.update_trash()
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
from pybitmessage.get_platform import platform
|
||||
# from pybitmessage.bmconfigparser import BMConfigParser
|
||||
from pybitmessage.helper_sql import sqlExecute, sqlQuery
|
||||
from functools import partial
|
||||
from kivy.clock import Clock
|
||||
from kivy.properties import (
|
||||
|
@ -62,17 +60,7 @@ class Trash(Screen):
|
|||
|
||||
def trashDataQuery(self, start_indx, end_indx):
|
||||
"""Trash message query"""
|
||||
self.trash_messages = sqlQuery(
|
||||
"SELECT toaddress, fromaddress, subject, message,"
|
||||
" folder ||',' || 'sent' as folder, ackdata As"
|
||||
" id, DATE(senttime) As actionTime, senttime as msgtime FROM sent"
|
||||
" WHERE folder = 'trash' and fromaddress = '{0}' UNION"
|
||||
" SELECT toaddress, fromaddress, subject, message,"
|
||||
" folder ||',' || 'inbox' as folder, msgid As id,"
|
||||
" DATE(received) As actionTime, received as msgtime FROM inbox"
|
||||
" WHERE folder = 'trash' and toaddress = '{0}'"
|
||||
" ORDER BY actionTime DESC limit {1}, {2}".format(
|
||||
state.association, start_indx, end_indx))
|
||||
self.trash_messages = []
|
||||
|
||||
def set_TrashCnt(self, Count): # pylint: disable=no-self-use
|
||||
"""This method is used to set trash message count"""
|
||||
|
@ -159,25 +147,10 @@ class Trash(Screen):
|
|||
toast(text_item)
|
||||
dialog_box.dismiss()
|
||||
|
||||
# def callback_for_delete_msg(self, text_item, *arg):
|
||||
# """Getting the callback of alert box"""
|
||||
# if text_item == 'Yes':
|
||||
# self.delete_message_from_trash()
|
||||
# else:
|
||||
# toast(text_item)
|
||||
|
||||
def delete_message_from_trash(self):
|
||||
"""Deleting message from trash"""
|
||||
self.children[1].active = True
|
||||
if self.table_name == 'inbox':
|
||||
sqlExecute(
|
||||
"DELETE FROM inbox WHERE msgid = ?;", self.delete_index)
|
||||
elif self.table_name == 'sent':
|
||||
sqlExecute(
|
||||
"DELETE FROM sent WHERE ackdata = ?;", self.delete_index)
|
||||
if int(state.trash_count) > 0:
|
||||
# msg_count_objs.trash_cnt.badge_text = str(
|
||||
# int(state.trash_count) - 1)
|
||||
self.set_TrashCnt(int(state.trash_count) - 1)
|
||||
state.trash_count = str(int(state.trash_count) - 1)
|
||||
Clock.schedule_once(self.callback_for_screen_load, 1)
|
||||
|
|
|
@ -58,7 +58,11 @@ class addressGenerator(StoppableThread):
|
|||
queueValue = queues.addressGeneratorQueue.get()
|
||||
try:
|
||||
address = self.address_list.pop(0)
|
||||
label = queueValue[3]
|
||||
print("queueValue: ", queueValue)
|
||||
if len(queueValue) >= 3:
|
||||
label = queueValue[3]
|
||||
else:
|
||||
label = ''
|
||||
|
||||
BMConfigParser().add_section(address)
|
||||
BMConfigParser().set(address, 'label', label)
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
"""
|
||||
The objectProcessor thread, of which there is only one,
|
||||
processes the network objects
|
||||
"""
|
||||
import logging
|
||||
import random
|
||||
import threading
|
||||
|
||||
from pybitmessage import state
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class objectProcessor(threading.Thread):
|
||||
"""
|
||||
The objectProcessor thread, of which there is only one, receives network
|
||||
objects (msg, broadcast, pubkey, getpubkey) from the receiveDataThreads.
|
||||
"""
|
||||
def __init__(self):
|
||||
threading.Thread.__init__(self, name="objectProcessor")
|
||||
random.seed()
|
||||
self.successfullyDecryptMessageTimings = []
|
||||
|
||||
def run(self):
|
||||
"""Process the objects from `.queues.objectProcessorQueue`"""
|
||||
while True:
|
||||
# pylint: disable=unused-variable
|
||||
if state.shutdown:
|
||||
state.shutdown = 2
|
||||
break
|
|
@ -1,161 +0,0 @@
|
|||
"""
|
||||
The `singleCleaner` class is a timer-driven thread that cleans data structures
|
||||
to free memory, resends messages when a remote node doesn't respond, and
|
||||
sends pong messages to keep connections alive if the network isn't busy.
|
||||
|
||||
It cleans these data structures in memory:
|
||||
- inventory (moves data to the on-disk sql database)
|
||||
- inventorySets (clears then reloads data out of sql database)
|
||||
|
||||
It cleans these tables on the disk:
|
||||
- inventory (clears expired objects)
|
||||
- pubkeys (clears pubkeys older than 4 weeks old which we have not used
|
||||
personally)
|
||||
- knownNodes (clears addresses which have not been online for over 3 days)
|
||||
|
||||
It resends messages when there has been no response:
|
||||
- resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...)
|
||||
- resends msg messages in 5 days (then 10 days, then 20 days, etc...)
|
||||
|
||||
"""
|
||||
# pylint: disable=relative-import, protected-access
|
||||
import gc
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
import time
|
||||
|
||||
# import knownnodes
|
||||
from pybitmessage import queues
|
||||
from pybitmessage import state
|
||||
# import tr
|
||||
from pybitmessage.bmconfigparser import BMConfigParser
|
||||
from pybitmessage.helper_sql import sqlExecute, sqlQuery
|
||||
from pybitmessage.inventory import Inventory
|
||||
# from network.connectionpool import BMConnectionPool
|
||||
from pybitmessage.network.threads import StoppableThread
|
||||
|
||||
|
||||
#: Equals 4 weeks. You could make this longer if you want
|
||||
#: but making it shorter would not be advisable because
|
||||
#: there is a very small possibility that it could keep you
|
||||
#: from obtaining a needed pubkey for a period of time.
|
||||
lengthOfTimeToHoldOnToAllPubkeys = 2419200
|
||||
|
||||
|
||||
class singleCleaner(StoppableThread):
|
||||
"""The singleCleaner thread class"""
|
||||
name = "singleCleaner"
|
||||
cycleLength = 300
|
||||
expireDiscoveredPeers = 300
|
||||
|
||||
def run(self): # pylint: disable=too-many-branches
|
||||
gc.disable()
|
||||
timeWeLastClearedInventoryAndPubkeysTables = 0
|
||||
try:
|
||||
state.maximumLengthOfTimeToBotherResendingMessages = (
|
||||
float(BMConfigParser().get(
|
||||
'bitmessagesettings', 'stopresendingafterxdays'))
|
||||
* 24 * 60 * 60
|
||||
) + (
|
||||
float(BMConfigParser().get(
|
||||
'bitmessagesettings', 'stopresendingafterxmonths'))
|
||||
* (60 * 60 * 24 * 365) / 12)
|
||||
except:
|
||||
# Either the user hasn't set stopresendingafterxdays and
|
||||
# stopresendingafterxmonths yet or the options are missing
|
||||
# from the config file.
|
||||
state.maximumLengthOfTimeToBotherResendingMessages = float('inf')
|
||||
|
||||
# initial wait
|
||||
if state.shutdown == 0:
|
||||
self.stop.wait(singleCleaner.cycleLength)
|
||||
|
||||
while state.shutdown == 0:
|
||||
queues.UISignalQueue.put((
|
||||
'updateStatusBar',
|
||||
'Doing housekeeping (Flushing inventory in memory to disk...)'
|
||||
))
|
||||
# Inventory().flush()
|
||||
queues.UISignalQueue.put(('updateStatusBar', ''))
|
||||
|
||||
# If we are running as a daemon then we are going to fill up the UI
|
||||
# queue which will never be handled by a UI. We should clear it to
|
||||
# save memory.
|
||||
# FIXME redundant?
|
||||
# if state.thisapp.daemon or not state.enableGUI:
|
||||
# queues.UISignalQueue.queue.clear()
|
||||
if timeWeLastClearedInventoryAndPubkeysTables < \
|
||||
int(time.time()) - 7380:
|
||||
timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
|
||||
queues.workerQueue.put(('sendOnionPeerObj', ''))
|
||||
queryreturn = []
|
||||
for row in queryreturn:
|
||||
if len(row) < 2:
|
||||
self.logger.error(
|
||||
'Something went wrong in the singleCleaner thread:'
|
||||
' a query did not return the requested fields. %r',
|
||||
row
|
||||
)
|
||||
self.stop.wait(3)
|
||||
break
|
||||
toAddress, ackData, status = row
|
||||
if status == 'awaitingpubkey':
|
||||
self.resendPubkeyRequest(toAddress)
|
||||
elif status == 'msgsent':
|
||||
self.resendMsg(ackData)
|
||||
deleteTrashMsgPermonantly()
|
||||
# discovery tracking
|
||||
exp = time.time() - singleCleaner.expireDiscoveredPeers
|
||||
reaper = (k for k, v in state.discoveredPeers.items() if v < exp)
|
||||
for k in reaper:
|
||||
try:
|
||||
del state.discoveredPeers[k]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# ..todo:: cleanup pending upload / download
|
||||
|
||||
gc.collect()
|
||||
|
||||
if state.shutdown == 0:
|
||||
self.stop.wait(singleCleaner.cycleLength)
|
||||
|
||||
def resendPubkeyRequest(self, address):
|
||||
"""Resend pubkey request for address"""
|
||||
self.logger.debug(
|
||||
'It has been a long time and we haven\'t heard a response to our'
|
||||
' getpubkey request. Sending again.'
|
||||
)
|
||||
try:
|
||||
# We need to take this entry out of the neededPubkeys structure
|
||||
# because the queues.workerQueue checks to see whether the entry
|
||||
# is already present and will not do the POW and send the message
|
||||
# because it assumes that it has already done it recently.
|
||||
del state.neededPubkeys[address]
|
||||
except:
|
||||
pass
|
||||
|
||||
queues.UISignalQueue.put((
|
||||
'updateStatusBar',
|
||||
'Doing work necessary to again attempt to request a public key...'
|
||||
))
|
||||
queues.workerQueue.put(('sendmessage', ''))
|
||||
|
||||
def resendMsg(self, ackdata):
|
||||
"""Resend message by ackdata"""
|
||||
self.logger.debug(
|
||||
'It has been a long time and we haven\'t heard an acknowledgement'
|
||||
' to our msg. Sending again.'
|
||||
)
|
||||
queues.workerQueue.put(('sendmessage', ''))
|
||||
queues.UISignalQueue.put((
|
||||
'updateStatusBar',
|
||||
'Doing work necessary to again attempt to deliver a message...'
|
||||
))
|
||||
|
||||
|
||||
def deleteTrashMsgPermonantly():
|
||||
"""This method is used to delete old messages"""
|
||||
ndays_before_time = datetime.now() - timedelta(days=30)
|
||||
old_messages = time.mktime(ndays_before_time.timetuple())
|
||||
return
|
|
@ -1,44 +0,0 @@
|
|||
"""
|
||||
Thread for performing PoW
|
||||
"""
|
||||
|
||||
from __future__ import division
|
||||
|
||||
from six.moves import queue
|
||||
|
||||
from pybitmessage import state
|
||||
from pybitmessage import queues
|
||||
from pybitmessage.network.threads import StoppableThread
|
||||
|
||||
|
||||
class singleWorker(StoppableThread):
|
||||
"""Thread for performing PoW"""
|
||||
|
||||
def __init__(self):
|
||||
super(singleWorker, self).__init__(name="singleWorker")
|
||||
self.busy = None
|
||||
|
||||
def stopThread(self):
|
||||
"""Signal through the queue that the thread should be stopped"""
|
||||
|
||||
try:
|
||||
queues.workerQueue.put(("stopThread", "data"))
|
||||
except queue.Full:
|
||||
self.logger.error('workerQueue is Full')
|
||||
super(singleWorker, self).stopThread()
|
||||
|
||||
def run(self):
|
||||
"""To run single worker thread"""
|
||||
if state.shutdown > 0:
|
||||
return
|
||||
|
||||
while state.shutdown == 0:
|
||||
self.busy = 0
|
||||
command, _ = queues.workerQueue.get()
|
||||
self.busy = 1
|
||||
if command == 'stopThread':
|
||||
self.busy = 0
|
||||
return
|
||||
|
||||
queues.workerQueue.task_done()
|
||||
self.logger.info("Quitting...")
|
|
@ -1,72 +0,0 @@
|
|||
"""Convenience functions for random operations. Not suitable for security / cryptography operations."""
|
||||
|
||||
import os
|
||||
import random
|
||||
|
||||
# from pyelliptic.openssl import OpenSSL
|
||||
|
||||
NoneType = type(None)
|
||||
|
||||
|
||||
def seed():
|
||||
"""Initialize random number generator"""
|
||||
random.seed()
|
||||
|
||||
|
||||
def randomBytes(n):
|
||||
"""Method randomBytes."""
|
||||
try:
|
||||
return os.urandom(n)
|
||||
except NotImplementedError:
|
||||
# return OpenSSL.rand(n)
|
||||
pass
|
||||
|
||||
|
||||
def randomshuffle(population):
|
||||
"""Method randomShuffle.
|
||||
|
||||
shuffle the sequence x in place.
|
||||
shuffles the elements in list in place,
|
||||
so they are in a random order.
|
||||
As Shuffle will alter data in-place,
|
||||
so its input must be a mutable sequence.
|
||||
In contrast, sample produces a new list
|
||||
and its input can be much more varied
|
||||
(tuple, string, xrange, bytearray, set, etc)
|
||||
"""
|
||||
random.shuffle(population)
|
||||
|
||||
|
||||
def randomsample(population, k):
|
||||
"""Method randomSample.
|
||||
|
||||
return a k length list of unique elements
|
||||
chosen from the population sequence.
|
||||
Used for random sampling
|
||||
without replacement, its called
|
||||
partial shuffle.
|
||||
"""
|
||||
return random.sample(population, k)
|
||||
|
||||
|
||||
def randomrandrange(x, y=None):
|
||||
"""Method randomRandrange.
|
||||
|
||||
return a randomly selected element from
|
||||
range(start, stop). This is equivalent to
|
||||
choice(range(start, stop)),
|
||||
but doesnt actually build a range object.
|
||||
"""
|
||||
if isinstance(y, NoneType):
|
||||
return random.randrange(x) # nosec
|
||||
return random.randrange(x, y) # nosec
|
||||
|
||||
|
||||
def randomchoice(population):
|
||||
"""Method randomchoice.
|
||||
|
||||
Return a random element from the non-empty
|
||||
sequence seq. If seq is empty, raises
|
||||
IndexError.
|
||||
"""
|
||||
return random.choice(population) # nosec
|
|
@ -1,124 +0,0 @@
|
|||
"""
|
||||
SQL-related functions defined here are really pass the queries (or other SQL
|
||||
commands) to :class:`.threads.sqlThread` through `sqlSubmitQueue` queue and check
|
||||
or return the result got from `sqlReturnQueue`.
|
||||
|
||||
This is done that way because :mod:`sqlite3` is so thread-unsafe that they
|
||||
won't even let you call it from different threads using your own locks.
|
||||
SQLite objects can only be used from one thread.
|
||||
|
||||
.. note:: This actually only applies for certain deployments, and/or
|
||||
really old version of sqlite. I haven't actually seen it anywhere.
|
||||
Current versions do have support for threading and multiprocessing.
|
||||
I don't see an urgent reason to refactor this, but it should be noted
|
||||
in the comment that the problem is mostly not valid. Sadly, last time
|
||||
I checked, there is no reliable way to check whether the library is
|
||||
or isn't thread-safe.
|
||||
"""
|
||||
|
||||
import threading
|
||||
import queue as Queue
|
||||
|
||||
sqlSubmitQueue = Queue.Queue()
|
||||
"""the queue for SQL"""
|
||||
sqlReturnQueue = Queue.Queue()
|
||||
"""the queue for results"""
|
||||
sqlLock = threading.Lock()
|
||||
|
||||
|
||||
def sqlQuery(sqlStatement, *args):
|
||||
"""
|
||||
Query sqlite and return results
|
||||
:param str sqlStatement: SQL statement string
|
||||
:param list args: SQL query parameters
|
||||
:rtype: list
|
||||
"""
|
||||
sqlLock.acquire()
|
||||
sqlSubmitQueue.put(sqlStatement)
|
||||
|
||||
if args == ():
|
||||
sqlSubmitQueue.put('')
|
||||
elif isinstance(args[0], (list, tuple)):
|
||||
sqlSubmitQueue.put(args[0])
|
||||
else:
|
||||
sqlSubmitQueue.put(args)
|
||||
queryreturn, _ = sqlReturnQueue.get()
|
||||
sqlLock.release()
|
||||
|
||||
return queryreturn
|
||||
|
||||
|
||||
def sqlExecuteChunked(sqlStatement, idCount, *args):
|
||||
"""Execute chunked SQL statement to avoid argument limit"""
|
||||
# SQLITE_MAX_VARIABLE_NUMBER,
|
||||
# unfortunately getting/setting isn't exposed to python
|
||||
sqlExecuteChunked.chunkSize = 999
|
||||
|
||||
if idCount == 0 or idCount > len(args):
|
||||
return 0
|
||||
|
||||
totalRowCount = 0
|
||||
with sqlLock:
|
||||
for i in range(
|
||||
len(args) - idCount, len(args),
|
||||
sqlExecuteChunked.chunkSize - (len(args) - idCount)
|
||||
):
|
||||
chunk_slice = args[
|
||||
i:i + sqlExecuteChunked.chunkSize - (len(args) - idCount)
|
||||
]
|
||||
sqlSubmitQueue.put(
|
||||
sqlStatement.format(','.join('?' * len(chunk_slice)))
|
||||
)
|
||||
# first static args, and then iterative chunk
|
||||
sqlSubmitQueue.put(
|
||||
args[0:len(args) - idCount] + chunk_slice
|
||||
)
|
||||
retVal = sqlReturnQueue.get()
|
||||
totalRowCount += retVal[1]
|
||||
sqlSubmitQueue.put('commit')
|
||||
return totalRowCount
|
||||
|
||||
|
||||
def sqlExecute(sqlStatement, *args):
|
||||
"""Execute SQL statement (optionally with arguments)"""
|
||||
sqlLock.acquire()
|
||||
sqlSubmitQueue.put(sqlStatement)
|
||||
|
||||
if args == ():
|
||||
sqlSubmitQueue.put('')
|
||||
else:
|
||||
sqlSubmitQueue.put(args)
|
||||
_, rowcount = sqlReturnQueue.get()
|
||||
sqlSubmitQueue.put('commit')
|
||||
sqlLock.release()
|
||||
return rowcount
|
||||
|
||||
|
||||
def sqlStoredProcedure(procName):
|
||||
"""Schedule procName to be run"""
|
||||
sqlLock.acquire()
|
||||
sqlSubmitQueue.put(procName)
|
||||
sqlLock.release()
|
||||
|
||||
|
||||
class SqlBulkExecute(object): # pylint: disable=no-init
|
||||
"""This is used when you have to execute the same statement in a cycle."""
|
||||
|
||||
def __enter__(self):
|
||||
sqlLock.acquire()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
sqlSubmitQueue.put('commit')
|
||||
sqlLock.release()
|
||||
|
||||
@staticmethod
|
||||
def execute(sqlStatement, *args):
|
||||
"""Used for statements that do not return results."""
|
||||
sqlSubmitQueue.put(sqlStatement)
|
||||
|
||||
if args == ():
|
||||
sqlSubmitQueue.put('')
|
||||
else:
|
||||
sqlSubmitQueue.put(args)
|
||||
sqlReturnQueue.get()
|
|
@ -1,7 +1,6 @@
|
|||
"""
|
||||
Sql queries for bitmessagekivy
|
||||
"""
|
||||
from pybitmessage.helper_sql import sqlQuery
|
||||
|
||||
|
||||
def search_sql(
|
||||
|
@ -69,6 +68,4 @@ def search_sql(
|
|||
sqlStatementBase += \
|
||||
"ORDER BY received DESC limit {0}, {1}".format(
|
||||
start_indx, end_indx)
|
||||
# elif folder == "addressbook":
|
||||
# sqlStatementBase += " limit {0}, {1}".format(start_indx, end_indx)
|
||||
return []#sqlQuery(sqlStatementBase, sqlArguments)
|
||||
return []
|
||||
|
|
|
@ -17,7 +17,6 @@ from pybitmessage.uikivysignaler import UIkivySignaler
|
|||
from pybitmessage.bmconfigparser import BMConfigParser
|
||||
# from debug import logger
|
||||
from functools import partial
|
||||
from pybitmessage.helper_sql import sqlExecute, sqlQuery
|
||||
from kivymd.app import MDApp
|
||||
from kivy.clock import Clock
|
||||
from kivy.core.clipboard import Clipboard
|
||||
|
@ -394,10 +393,8 @@ class NavigateApp(MDApp):
|
|||
folder="addressbook")]
|
||||
if label and address and address not in stored_address \
|
||||
and label not in stored_labels and pupup_obj.valid:
|
||||
# state.navinstance = self.parent.children[1]
|
||||
queues.UISignalQueue.put(('rerenderAddressBook', ''))
|
||||
self.add_popup.dismiss()
|
||||
sqlExecute("INSERT INTO addressbook VALUES(?,?)", label, address)
|
||||
try:
|
||||
rootIds = self.root.ids
|
||||
except Exception as e:
|
||||
|
@ -647,58 +644,21 @@ class NavigateApp(MDApp):
|
|||
@staticmethod
|
||||
def get_inbox_count():
|
||||
"""Getting inbox count"""
|
||||
# state.inbox_count = str(sqlQuery(
|
||||
# "SELECT COUNT(*) FROM inbox WHERE toaddress = '{}' and"
|
||||
# " folder = 'inbox' ;".format(state.association))[0][0])
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def get_sent_count():
|
||||
"""Getting sent count"""
|
||||
# state.sent_count = str(sqlQuery(
|
||||
# "SELECT COUNT(*) FROM sent WHERE fromaddress = '{}' and"
|
||||
# " folder = 'sent' ;".format(state.association))[0][0])
|
||||
pass
|
||||
|
||||
def set_message_count(self):
|
||||
"""Setting message count"""
|
||||
# msg_counter_objs = state.kivyapp.root.children[0].children[0].ids
|
||||
# try:
|
||||
# msg_counter_objs = (
|
||||
# self.root_window.children[0].children[2].children[0].ids)
|
||||
# except Exception:
|
||||
# msg_counter_objs = (
|
||||
# self.root_window.children[2].children[2].children[0].ids)
|
||||
# self.get_inbox_count()
|
||||
# self.get_sent_count()
|
||||
# state.trash_count = str(sqlQuery(
|
||||
# "SELECT (SELECT count(*) FROM sent"
|
||||
# " where fromaddress = '{0}' and folder = 'trash' )"
|
||||
# "+(SELECT count(*) FROM inbox where toaddress = '{0}' and"
|
||||
# " folder = 'trash') AS SumCount".format(state.association))[0][0])
|
||||
# state.draft_count = str(sqlQuery(
|
||||
# "SELECT COUNT(*) FROM sent WHERE fromaddress = '{}' and"
|
||||
# " folder = 'draft' ;".format(state.association))[0][0])
|
||||
# state.all_count = str(int(state.sent_count) + int(state.inbox_count))
|
||||
pass
|
||||
# if msg_counter_objs:
|
||||
# msg_counter_objs.send_cnt.badge_text = state.sent_count
|
||||
# msg_counter_objs.inbox_cnt.badge_text = state.inbox_count
|
||||
# msg_counter_objs.trash_cnt.badge_text = state.trash_count
|
||||
# msg_counter_objs.draft_cnt.badge_text = state.draft_count
|
||||
# msg_counter_objs.allmail_cnt.badge_text = state.all_count
|
||||
|
||||
|
||||
def on_start(self):
|
||||
"""Setting message count"""
|
||||
self.set_message_count()
|
||||
|
||||
# @staticmethod
|
||||
# def on_stop():
|
||||
# """On stop methos is used for stoping the runing script"""
|
||||
# print("*******************EXITING FROM APPLICATION*******************")
|
||||
# import shutdown
|
||||
# shutdown.doCleanShutdown()
|
||||
|
||||
@staticmethod
|
||||
def current_address_label(current_add_label=None, current_addr=None):
|
||||
"""Getting current address labels"""
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
"""
|
||||
Announce addresses as they are received from other hosts
|
||||
"""
|
||||
import queue as Queue
|
||||
|
||||
import state
|
||||
from helper_random import randomshuffle
|
||||
from network.assemble import assemble_addr
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from queues import addrQueue
|
||||
from network.threads import StoppableThread
|
||||
|
||||
|
||||
class AddrThread(StoppableThread):
|
||||
"""(Node) address broadcasting thread"""
|
||||
name = "AddrBroadcaster"
|
||||
|
||||
def run(self):
|
||||
while not state.shutdown:
|
||||
chunk = []
|
||||
while True:
|
||||
try:
|
||||
data = addrQueue.get(False)
|
||||
chunk.append(data)
|
||||
except Queue.Empty:
|
||||
break
|
||||
|
||||
if chunk:
|
||||
# Choose peers randomly
|
||||
connections = BMConnectionPool().establishedConnections()
|
||||
randomshuffle(connections)
|
||||
for i in connections:
|
||||
randomshuffle(chunk)
|
||||
filtered = []
|
||||
for stream, peer, seen, destination in chunk:
|
||||
# peer's own address or address received from peer
|
||||
if i.destination in (peer, destination):
|
||||
continue
|
||||
if stream not in i.streams:
|
||||
continue
|
||||
filtered.append((stream, peer, seen))
|
||||
if filtered:
|
||||
i.append_write_buf(assemble_addr(filtered))
|
||||
|
||||
addrQueue.iterate()
|
||||
for i in range(len(chunk)):
|
||||
addrQueue.task_done()
|
||||
self.stop.wait(1)
|
|
@ -1,174 +0,0 @@
|
|||
"""
|
||||
Improved version of asyncore dispatcher
|
||||
"""
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
|
||||
import network.asyncore_pollchoose as asyncore
|
||||
import state
|
||||
from network.threads import BusyError, nonBlocking
|
||||
|
||||
|
||||
class ProcessingError(Exception):
|
||||
"""General class for protocol parser exception,
|
||||
use as a base for others."""
|
||||
pass
|
||||
|
||||
|
||||
class UnknownStateError(ProcessingError):
|
||||
"""Parser points to an unknown (unimplemented) state."""
|
||||
pass
|
||||
|
||||
|
||||
class AdvancedDispatcher(asyncore.dispatcher):
|
||||
"""Improved version of asyncore dispatcher,
|
||||
with buffers and protocol state."""
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
_buf_len = 131072 # 128kB
|
||||
|
||||
def __init__(self, sock=None):
|
||||
# python 2 below condition is used
|
||||
# if not hasattr(self, '_map'):
|
||||
# python 3 below condition is used
|
||||
if '_map' not in dir(self):
|
||||
asyncore.dispatcher.__init__(self, sock)
|
||||
self.read_buf = bytearray()
|
||||
self.write_buf = bytearray()
|
||||
self.state = "init"
|
||||
self.lastTx = time.time()
|
||||
self.sentBytes = 0
|
||||
self.receivedBytes = 0
|
||||
self.expectBytes = 0
|
||||
self.readLock = threading.RLock()
|
||||
self.writeLock = threading.RLock()
|
||||
self.processingLock = threading.RLock()
|
||||
|
||||
def append_write_buf(self, data):
|
||||
"""Append binary data to the end of stream write buffer."""
|
||||
if data:
|
||||
if isinstance(data, list):
|
||||
with self.writeLock:
|
||||
for chunk in data:
|
||||
self.write_buf.extend(chunk)
|
||||
else:
|
||||
with self.writeLock:
|
||||
self.write_buf.extend(data)
|
||||
|
||||
def slice_write_buf(self, length=0):
|
||||
"""Cut the beginning of the stream write buffer."""
|
||||
if length > 0:
|
||||
with self.writeLock:
|
||||
if length >= len(self.write_buf):
|
||||
del self.write_buf[:]
|
||||
else:
|
||||
del self.write_buf[0:length]
|
||||
|
||||
def slice_read_buf(self, length=0):
|
||||
"""Cut the beginning of the stream read buffer."""
|
||||
if length > 0:
|
||||
with self.readLock:
|
||||
if length >= len(self.read_buf):
|
||||
del self.read_buf[:]
|
||||
else:
|
||||
del self.read_buf[0:length]
|
||||
|
||||
def process(self):
|
||||
"""Process (parse) data that's in the buffer,
|
||||
as long as there is enough data and the connection is open."""
|
||||
while self.connected and not state.shutdown:
|
||||
try:
|
||||
with nonBlocking(self.processingLock):
|
||||
if not self.connected or state.shutdown:
|
||||
break
|
||||
if len(self.read_buf) < self.expectBytes:
|
||||
return False
|
||||
try:
|
||||
cmd = getattr(self, "state_" + str(self.state))
|
||||
except AttributeError:
|
||||
self.logger.error(
|
||||
'Unknown state %s', self.state, exc_info=True)
|
||||
raise UnknownStateError(self.state)
|
||||
if not cmd():
|
||||
break
|
||||
except BusyError:
|
||||
return False
|
||||
return False
|
||||
|
||||
def set_state(self, state_str, length=0, expectBytes=0):
|
||||
"""Set the next processing state."""
|
||||
self.expectBytes = expectBytes
|
||||
self.slice_read_buf(length)
|
||||
self.state = state_str
|
||||
|
||||
def writable(self):
|
||||
"""Is data from the write buffer ready to be sent to the network?"""
|
||||
self.uploadChunk = AdvancedDispatcher._buf_len
|
||||
if asyncore.maxUploadRate > 0:
|
||||
self.uploadChunk = int(asyncore.uploadBucket)
|
||||
self.uploadChunk = min(self.uploadChunk, len(self.write_buf))
|
||||
return asyncore.dispatcher.writable(self) and (
|
||||
self.connecting or (
|
||||
self.connected and self.uploadChunk > 0))
|
||||
|
||||
def readable(self):
|
||||
"""Is the read buffer ready to accept data from the network?"""
|
||||
self.downloadChunk = AdvancedDispatcher._buf_len
|
||||
if asyncore.maxDownloadRate > 0:
|
||||
self.downloadChunk = int(asyncore.downloadBucket)
|
||||
try:
|
||||
if self.expectBytes > 0 and not self.fullyEstablished:
|
||||
self.downloadChunk = min(
|
||||
self.downloadChunk, self.expectBytes - len(self.read_buf))
|
||||
if self.downloadChunk < 0:
|
||||
self.downloadChunk = 0
|
||||
except AttributeError:
|
||||
pass
|
||||
return asyncore.dispatcher.readable(self) and (
|
||||
self.connecting or self.accepting or (
|
||||
self.connected and self.downloadChunk > 0))
|
||||
|
||||
def handle_read(self):
|
||||
"""Append incoming data to the read buffer."""
|
||||
self.lastTx = time.time()
|
||||
newData = self.recv(self.downloadChunk)
|
||||
self.receivedBytes += len(newData)
|
||||
asyncore.update_received(len(newData))
|
||||
with self.readLock:
|
||||
self.read_buf.extend(newData)
|
||||
|
||||
def handle_write(self):
|
||||
"""Send outgoing data from write buffer."""
|
||||
self.lastTx = time.time()
|
||||
written = self.send(self.write_buf[0:self.uploadChunk])
|
||||
asyncore.update_sent(written)
|
||||
self.sentBytes += written
|
||||
self.slice_write_buf(written)
|
||||
|
||||
def handle_connect_event(self):
|
||||
"""Callback for connection established event."""
|
||||
try:
|
||||
asyncore.dispatcher.handle_connect_event(self)
|
||||
except socket.error as e:
|
||||
# pylint: disable=protected-access
|
||||
if e.args[0] not in asyncore._DISCONNECTED:
|
||||
raise
|
||||
|
||||
def handle_connect(self):
|
||||
"""Method for handling connection established implementations."""
|
||||
self.lastTx = time.time()
|
||||
|
||||
def state_close(self): # pylint: disable=no-self-use
|
||||
"""Signal to the processing loop to end."""
|
||||
return False
|
||||
|
||||
def handle_close(self):
|
||||
"""Callback for connection being closed,
|
||||
but can also be called directly when you want connection to close."""
|
||||
with self.readLock:
|
||||
self.read_buf = bytearray()
|
||||
with self.writeLock:
|
||||
self.write_buf = bytearray()
|
||||
self.set_state("close")
|
||||
self.close()
|
|
@ -1,46 +0,0 @@
|
|||
"""
|
||||
Announce myself (node address)
|
||||
"""
|
||||
import time
|
||||
|
||||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from network.assemble import assemble_addr
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.udp import UDPSocket
|
||||
from network.node import Peer
|
||||
from network.threads import StoppableThread
|
||||
|
||||
|
||||
class AnnounceThread(StoppableThread):
|
||||
"""A thread to manage regular announcing of this node"""
|
||||
name = "Announcer"
|
||||
|
||||
def run(self):
|
||||
lastSelfAnnounced = 0
|
||||
while not self._stopped and state.shutdown == 0:
|
||||
processed = 0
|
||||
if lastSelfAnnounced < time.time() - UDPSocket.announceInterval:
|
||||
self.announceSelf()
|
||||
lastSelfAnnounced = time.time()
|
||||
if processed == 0:
|
||||
self.stop.wait(10)
|
||||
|
||||
@staticmethod
|
||||
def announceSelf():
|
||||
"""Announce our presence"""
|
||||
for connection in [udpSockets for udpSockets in BMConnectionPool().udpSockets.values()]:
|
||||
if not connection.announcing:
|
||||
continue
|
||||
for stream in state.streamsInWhichIAmParticipating:
|
||||
addr = (
|
||||
stream,
|
||||
# state.Peer('127.0.0.1',int( BMConfigParser().safeGet("bitmessagesettings", "port"))),
|
||||
# int(time.time()))
|
||||
# connection.append_write_buf(BMProto.assembleAddr([addr]))
|
||||
Peer(
|
||||
'127.0.0.1',
|
||||
BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'port')),
|
||||
time.time())
|
||||
connection.append_write_buf(assemble_addr([addr]))
|
|
@ -1,32 +0,0 @@
|
|||
"""
|
||||
Create bitmessage protocol command packets
|
||||
"""
|
||||
import struct
|
||||
|
||||
import addresses
|
||||
from network.constants import MAX_ADDR_COUNT
|
||||
from network.node import Peer
|
||||
from protocol import CreatePacket, encodeHost
|
||||
|
||||
|
||||
def assemble_addr(peerList):
|
||||
"""Create address command"""
|
||||
if isinstance(peerList, Peer):
|
||||
peerList = [peerList]
|
||||
if not peerList:
|
||||
return bytes()
|
||||
retval = bytes()
|
||||
for i in range(0, len(peerList), MAX_ADDR_COUNT):
|
||||
payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT]))
|
||||
for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]:
|
||||
payload += struct.pack(
|
||||
'>Q', int(timestamp)) # 64-bit time
|
||||
|
||||
payload += struct.pack('>I', stream)
|
||||
# service bit flags offered by this node
|
||||
payload += struct.pack('>q', 1)
|
||||
payload += encodeHost(peer.host)
|
||||
# remote port
|
||||
payload += struct.pack('>H', peer.port)
|
||||
retval += CreatePacket('addr', payload)
|
||||
return retval
|
|
@ -1,1012 +0,0 @@
|
|||
"""
|
||||
Basic infrastructure for asynchronous socket service clients and servers.
|
||||
"""
|
||||
# -*- Mode: Python -*-
|
||||
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
# pylint: disable=too-many-branches,too-many-lines,global-statement
|
||||
# pylint: disable=redefined-builtin,no-self-use
|
||||
import os
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
from errno import (
|
||||
EADDRINUSE, EAGAIN, EALREADY, EBADF, ECONNABORTED, ECONNREFUSED,
|
||||
ECONNRESET, EHOSTUNREACH, EINPROGRESS, EINTR, EINVAL, EISCONN, ENETUNREACH,
|
||||
ENOTCONN, ENOTSOCK, EPIPE, ESHUTDOWN, ETIMEDOUT, EWOULDBLOCK, errorcode
|
||||
)
|
||||
from threading import current_thread
|
||||
|
||||
from pybitmessage import helper_random
|
||||
|
||||
try:
|
||||
from errno import WSAEWOULDBLOCK
|
||||
except (ImportError, AttributeError):
|
||||
WSAEWOULDBLOCK = EWOULDBLOCK
|
||||
try:
|
||||
from errno import WSAENOTSOCK
|
||||
except (ImportError, AttributeError):
|
||||
WSAENOTSOCK = ENOTSOCK
|
||||
try:
|
||||
from errno import WSAECONNRESET
|
||||
except (ImportError, AttributeError):
|
||||
WSAECONNRESET = ECONNRESET
|
||||
try:
|
||||
# Desirable side-effects on Windows; imports winsock error numbers
|
||||
from errno import WSAEADDRINUSE # pylint: disable=unused-import
|
||||
except (ImportError, AttributeError):
|
||||
WSAEADDRINUSE = EADDRINUSE
|
||||
|
||||
|
||||
_DISCONNECTED = frozenset((
|
||||
ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE, EBADF, ECONNREFUSED,
|
||||
EHOSTUNREACH, ENETUNREACH, ETIMEDOUT, WSAECONNRESET))
|
||||
|
||||
OP_READ = 1
|
||||
OP_WRITE = 2
|
||||
|
||||
try:
|
||||
socket_map
|
||||
except NameError:
|
||||
socket_map = {}
|
||||
|
||||
|
||||
def _strerror(err):
|
||||
try:
|
||||
return os.strerror(err)
|
||||
except (ValueError, OverflowError, NameError):
|
||||
if err in errorcode:
|
||||
return errorcode[err]
|
||||
return "Unknown error %s" % err
|
||||
# ret18 ("Unknown error {}".format(err))
|
||||
|
||||
|
||||
class ExitNow(Exception):
|
||||
"""We don't use directly but may be necessary as we replace
|
||||
asyncore due to some library raising or expecting it"""
|
||||
pass
|
||||
|
||||
|
||||
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
|
||||
|
||||
maxDownloadRate = 0
|
||||
downloadTimestamp = 0
|
||||
downloadBucket = 0
|
||||
receivedBytes = 0
|
||||
maxUploadRate = 0
|
||||
uploadTimestamp = 0
|
||||
uploadBucket = 0
|
||||
sentBytes = 0
|
||||
|
||||
|
||||
def read(obj):
|
||||
"""Event to read from the object, i.e. its network socket."""
|
||||
|
||||
if not can_receive():
|
||||
return
|
||||
try:
|
||||
obj.handle_read_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except BaseException:
|
||||
obj.handle_error()
|
||||
|
||||
|
||||
def write(obj):
|
||||
"""Event to write to the object, i.e. its network socket."""
|
||||
|
||||
if not can_send():
|
||||
return
|
||||
try:
|
||||
obj.handle_write_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except BaseException:
|
||||
obj.handle_error()
|
||||
|
||||
|
||||
def set_rates(download, upload):
|
||||
"""Set throttling rates"""
|
||||
|
||||
global maxDownloadRate, maxUploadRate, downloadBucket
|
||||
global uploadBucket, downloadTimestamp, uploadTimestamp
|
||||
|
||||
maxDownloadRate = float(download) * 1024
|
||||
maxUploadRate = float(upload) * 1024
|
||||
downloadBucket = maxDownloadRate
|
||||
uploadBucket = maxUploadRate
|
||||
downloadTimestamp = time.time()
|
||||
uploadTimestamp = time.time()
|
||||
|
||||
|
||||
def can_receive():
|
||||
"""Predicate indicating whether the download throttle is in effect"""
|
||||
|
||||
return maxDownloadRate == 0 or downloadBucket > 0
|
||||
|
||||
|
||||
def can_send():
|
||||
"""Predicate indicating whether the upload throttle is in effect"""
|
||||
|
||||
return maxUploadRate == 0 or uploadBucket > 0
|
||||
|
||||
|
||||
def update_received(download=0):
|
||||
"""Update the receiving throttle"""
|
||||
|
||||
global receivedBytes, downloadBucket, downloadTimestamp
|
||||
|
||||
currentTimestamp = time.time()
|
||||
receivedBytes += download
|
||||
if maxDownloadRate > 0:
|
||||
bucketIncrease = \
|
||||
maxDownloadRate * (currentTimestamp - downloadTimestamp)
|
||||
downloadBucket += bucketIncrease
|
||||
if downloadBucket > maxDownloadRate:
|
||||
downloadBucket = int(maxDownloadRate)
|
||||
downloadBucket -= download
|
||||
downloadTimestamp = currentTimestamp
|
||||
|
||||
|
||||
def update_sent(upload=0):
|
||||
"""Update the sending throttle"""
|
||||
|
||||
global sentBytes, uploadBucket, uploadTimestamp
|
||||
|
||||
currentTimestamp = time.time()
|
||||
sentBytes += upload
|
||||
if maxUploadRate > 0:
|
||||
bucketIncrease = maxUploadRate * (currentTimestamp - uploadTimestamp)
|
||||
uploadBucket += bucketIncrease
|
||||
if uploadBucket > maxUploadRate:
|
||||
uploadBucket = int(maxUploadRate)
|
||||
uploadBucket -= upload
|
||||
uploadTimestamp = currentTimestamp
|
||||
|
||||
|
||||
def _exception(obj):
|
||||
"""Handle exceptions as appropriate"""
|
||||
|
||||
try:
|
||||
obj.handle_expt_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except BaseException:
|
||||
obj.handle_error()
|
||||
|
||||
|
||||
def readwrite(obj, flags):
|
||||
"""Read and write any pending data to/from the object"""
|
||||
|
||||
try:
|
||||
if flags & select.POLLIN and can_receive():
|
||||
obj.handle_read_event()
|
||||
if flags & select.POLLOUT and can_send():
|
||||
obj.handle_write_event()
|
||||
if flags & select.POLLPRI:
|
||||
obj.handle_expt_event()
|
||||
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
|
||||
obj.handle_close()
|
||||
except socket.error as e:
|
||||
if e.args[0] not in _DISCONNECTED:
|
||||
obj.handle_error()
|
||||
else:
|
||||
obj.handle_close()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except BaseException:
|
||||
obj.handle_error()
|
||||
|
||||
|
||||
def select_poller(timeout=0.0, map=None):
|
||||
"""A poller which uses select(), available on most platforms."""
|
||||
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if map:
|
||||
rd = []
|
||||
wt = []
|
||||
ex = []
|
||||
for fd, obj in list(map.items()):
|
||||
is_r = obj.readable()
|
||||
is_w = obj.writable()
|
||||
if is_r:
|
||||
rd.append(fd)
|
||||
# accepting sockets should not be writable
|
||||
if is_w and not obj.accepting:
|
||||
wt.append(fd)
|
||||
if is_r or is_w:
|
||||
ex.append(fd)
|
||||
if [] == rd == wt == ex:
|
||||
time.sleep(timeout)
|
||||
return
|
||||
try:
|
||||
rd, wt, ex = select.select(rd, wt, ex, timeout)
|
||||
except KeyboardInterrupt:
|
||||
return
|
||||
except socket.error as err:
|
||||
if err.args[0] in (EBADF, EINTR):
|
||||
return
|
||||
except Exception as err:
|
||||
if err.args[0] in (WSAENOTSOCK, ):
|
||||
return
|
||||
|
||||
for fd in helper_random.randomsample(rd, len(rd)):
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
read(obj)
|
||||
|
||||
for fd in helper_random.randomsample(wt, len(wt)):
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
write(obj)
|
||||
|
||||
for fd in ex:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
_exception(obj)
|
||||
else:
|
||||
current_thread().stop.wait(timeout)
|
||||
|
||||
|
||||
def poll_poller(timeout=0.0, map=None):
|
||||
"""A poller which uses poll(), available on most UNIXen."""
|
||||
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if timeout is not None:
|
||||
# timeout is in milliseconds
|
||||
timeout = int(timeout * 1000)
|
||||
try:
|
||||
poll_poller.pollster
|
||||
except AttributeError:
|
||||
poll_poller.pollster = select.poll()
|
||||
if map:
|
||||
for fd, obj in list(map.items()):
|
||||
flags = newflags = 0
|
||||
if obj.readable():
|
||||
flags |= select.POLLIN | select.POLLPRI
|
||||
newflags |= OP_READ
|
||||
else:
|
||||
newflags &= ~ OP_READ
|
||||
# accepting sockets should not be writable
|
||||
if obj.writable() and not obj.accepting:
|
||||
flags |= select.POLLOUT
|
||||
newflags |= OP_WRITE
|
||||
else:
|
||||
newflags &= ~ OP_WRITE
|
||||
if newflags != obj.poller_flags:
|
||||
obj.poller_flags = newflags
|
||||
try:
|
||||
if obj.poller_registered:
|
||||
poll_poller.pollster.modify(fd, flags)
|
||||
else:
|
||||
poll_poller.pollster.register(fd, flags)
|
||||
obj.poller_registered = True
|
||||
except IOError:
|
||||
pass
|
||||
try:
|
||||
r = poll_poller.pollster.poll(timeout)
|
||||
except KeyboardInterrupt:
|
||||
r = []
|
||||
except socket.error as err:
|
||||
if err.args[0] in (EBADF, WSAENOTSOCK, EINTR):
|
||||
return
|
||||
for fd, flags in helper_random.randomsample(r, len(r)):
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
readwrite(obj, flags)
|
||||
else:
|
||||
current_thread().stop.wait(timeout)
|
||||
|
||||
|
||||
# Aliases for backward compatibility
|
||||
poll = select_poller
|
||||
poll2 = poll3 = poll_poller
|
||||
|
||||
|
||||
def epoll_poller(timeout=0.0, map=None):
|
||||
"""A poller which uses epoll(), supported on Linux 2.5.44 and newer."""
|
||||
|
||||
if map is None:
|
||||
map = socket_map
|
||||
try:
|
||||
epoll_poller.pollster
|
||||
except AttributeError:
|
||||
epoll_poller.pollster = select.epoll()
|
||||
if map:
|
||||
for fd, obj in map.items():
|
||||
flags = newflags = 0
|
||||
if obj.readable():
|
||||
flags |= select.POLLIN | select.POLLPRI
|
||||
newflags |= OP_READ
|
||||
else:
|
||||
newflags &= ~ OP_READ
|
||||
# accepting sockets should not be writable
|
||||
if obj.writable() and not obj.accepting:
|
||||
flags |= select.POLLOUT
|
||||
newflags |= OP_WRITE
|
||||
else:
|
||||
newflags &= ~ OP_WRITE
|
||||
if newflags != obj.poller_flags:
|
||||
obj.poller_flags = newflags
|
||||
# Only check for exceptions if object was either readable
|
||||
# or writable.
|
||||
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
|
||||
try:
|
||||
if obj.poller_registered:
|
||||
epoll_poller.pollster.modify(fd, flags)
|
||||
else:
|
||||
epoll_poller.pollster.register(fd, flags)
|
||||
obj.poller_registered = True
|
||||
except IOError:
|
||||
pass
|
||||
try:
|
||||
r = epoll_poller.pollster.poll(timeout)
|
||||
except IOError as e:
|
||||
if e.errno != EINTR:
|
||||
raise
|
||||
r = []
|
||||
except select.error as err:
|
||||
if err.args[0] != EINTR:
|
||||
raise
|
||||
r = []
|
||||
for fd, flags in helper_random.randomsample(r, len(r)):
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
readwrite(obj, flags)
|
||||
else:
|
||||
current_thread().stop.wait(timeout)
|
||||
|
||||
|
||||
def kqueue_poller(timeout=0.0, map=None):
|
||||
"""A poller which uses kqueue(), BSD specific."""
|
||||
# pylint: disable=no-member,too-many-statements
|
||||
|
||||
if map is None:
|
||||
map = socket_map
|
||||
try:
|
||||
kqueue_poller.pollster
|
||||
except AttributeError:
|
||||
kqueue_poller.pollster = select.kqueue()
|
||||
if map:
|
||||
updates = []
|
||||
selectables = 0
|
||||
for fd, obj in map.items():
|
||||
kq_filter = 0
|
||||
if obj.readable():
|
||||
kq_filter |= 1
|
||||
selectables += 1
|
||||
if obj.writable() and not obj.accepting:
|
||||
kq_filter |= 2
|
||||
selectables += 1
|
||||
if kq_filter != obj.poller_filter:
|
||||
# unlike other pollers, READ and WRITE aren't OR able but have
|
||||
# to be set and checked separately
|
||||
if kq_filter & 1 != obj.poller_filter & 1:
|
||||
poller_flags = select.KQ_EV_ADD
|
||||
if kq_filter & 1:
|
||||
poller_flags |= select.KQ_EV_ENABLE
|
||||
else:
|
||||
poller_flags |= select.KQ_EV_DISABLE
|
||||
updates.append(
|
||||
select.kevent(
|
||||
fd, filter=select.KQ_FILTER_READ,
|
||||
flags=poller_flags))
|
||||
if kq_filter & 2 != obj.poller_filter & 2:
|
||||
poller_flags = select.KQ_EV_ADD
|
||||
if kq_filter & 2:
|
||||
poller_flags |= select.KQ_EV_ENABLE
|
||||
else:
|
||||
poller_flags |= select.KQ_EV_DISABLE
|
||||
updates.append(
|
||||
select.kevent(
|
||||
fd, filter=select.KQ_FILTER_WRITE,
|
||||
flags=poller_flags))
|
||||
obj.poller_filter = kq_filter
|
||||
|
||||
if not selectables:
|
||||
# unlike other pollers, kqueue poll does not wait if there are no
|
||||
# filters setup
|
||||
current_thread().stop.wait(timeout)
|
||||
return
|
||||
|
||||
events = kqueue_poller.pollster.control(updates, selectables, timeout)
|
||||
if len(events) > 1:
|
||||
events = helper_random.randomsample(events, len(events))
|
||||
|
||||
for event in events:
|
||||
fd = event.ident
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
if event.flags & select.KQ_EV_ERROR:
|
||||
_exception(obj)
|
||||
continue
|
||||
if event.flags & select.KQ_EV_EOF and event.data and event.fflags:
|
||||
obj.handle_close()
|
||||
continue
|
||||
if event.filter == select.KQ_FILTER_READ:
|
||||
read(obj)
|
||||
if event.filter == select.KQ_FILTER_WRITE:
|
||||
write(obj)
|
||||
else:
|
||||
current_thread().stop.wait(timeout)
|
||||
|
||||
|
||||
def loop(timeout=30.0, _=False, map=None, count=None, poller=None):
|
||||
"""Poll in a loop, until count or timeout is reached"""
|
||||
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if count is None:
|
||||
count = True
|
||||
# code which grants backward compatibility with "use_poll"
|
||||
# argument which should no longer be used in favor of
|
||||
# "poller"
|
||||
|
||||
# if poller is None:
|
||||
# if use_poll:
|
||||
# poller = poll_poller
|
||||
# elif hasattr(select, 'epoll'):
|
||||
# poller = epoll_poller
|
||||
# elif hasattr(select, 'kqueue'):
|
||||
# poller = kqueue_poller
|
||||
# elif hasattr(select, 'poll'):
|
||||
# poller = poll_poller
|
||||
# elif hasattr(select, 'select'):
|
||||
# poller = select_poller
|
||||
poller = select_poller
|
||||
if timeout == 0:
|
||||
deadline = 0
|
||||
else:
|
||||
deadline = time.time() + timeout
|
||||
while count:
|
||||
# fill buckets first
|
||||
update_sent()
|
||||
update_received()
|
||||
subtimeout = deadline - time.time()
|
||||
if subtimeout <= 0:
|
||||
break
|
||||
# then poll
|
||||
poller(subtimeout, map)
|
||||
if isinstance(count, int):
|
||||
count = count - 1
|
||||
|
||||
|
||||
class dispatcher(object):
|
||||
"""Dispatcher for socket objects"""
|
||||
# pylint: disable=too-many-public-methods,too-many-instance-attributes
|
||||
|
||||
debug = False
|
||||
connected = False
|
||||
accepting = False
|
||||
connecting = False
|
||||
closing = False
|
||||
addr = None
|
||||
ignore_log_types = frozenset(['warning'])
|
||||
poller_registered = False
|
||||
poller_flags = 0
|
||||
# don't do network IO with a smaller bucket than this
|
||||
minTx = 1500
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
if map is None:
|
||||
self._map = socket_map
|
||||
else:
|
||||
self._map = map
|
||||
|
||||
self._fileno = None
|
||||
|
||||
if sock:
|
||||
# Set to nonblocking just to make sure for cases where we
|
||||
# get a socket from a blocking source.
|
||||
sock.setblocking(0)
|
||||
self.set_socket(sock, map)
|
||||
self.connected = True
|
||||
# The constructor no longer requires that the socket
|
||||
# passed be connected.
|
||||
try:
|
||||
self.addr = sock.getpeername()
|
||||
except socket.error as err:
|
||||
if err.args[0] in (ENOTCONN, EINVAL):
|
||||
# To handle the case where we got an unconnected
|
||||
# socket.
|
||||
self.connected = False
|
||||
else:
|
||||
# The socket is broken in some unknown way, alert
|
||||
# the user and remove it from the map (to prevent
|
||||
# polling of broken sockets).
|
||||
self.del_channel(map)
|
||||
raise
|
||||
else:
|
||||
self.socket = None
|
||||
|
||||
def __repr__(self):
|
||||
status = [self.__class__.__module__ + "." + self.__class__.__name__]
|
||||
if self.accepting and self.addr:
|
||||
status.append('listening')
|
||||
elif self.connected:
|
||||
status.append('connected')
|
||||
if self.addr is not None:
|
||||
try:
|
||||
status.append('%s:%d' % self.addr)
|
||||
except TypeError:
|
||||
status.append(repr(self.addr))
|
||||
return '<%s at %#x>' % (' '.join(status), id(self))
|
||||
|
||||
__str__ = __repr__
|
||||
|
||||
def add_channel(self, map=None):
|
||||
"""Add a channel"""
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
if map is None:
|
||||
map = self._map
|
||||
map[self._fileno] = self
|
||||
self.poller_flags = 0
|
||||
self.poller_filter = 0
|
||||
|
||||
def del_channel(self, map=None):
|
||||
"""Delete a channel"""
|
||||
fd = self._fileno
|
||||
if map is None:
|
||||
map = self._map
|
||||
if fd in map:
|
||||
del map[fd]
|
||||
if self._fileno:
|
||||
try:
|
||||
kqueue_poller.pollster.control([select.kevent(
|
||||
fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE)], 0)
|
||||
except(AttributeError, KeyError, TypeError, IOError, OSError):
|
||||
pass
|
||||
try:
|
||||
kqueue_poller.pollster.control([select.kevent(
|
||||
fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)], 0)
|
||||
except(AttributeError, KeyError, TypeError, IOError, OSError):
|
||||
pass
|
||||
try:
|
||||
epoll_poller.pollster.unregister(fd)
|
||||
except (AttributeError, KeyError, TypeError, IOError):
|
||||
# no epoll used, or not registered
|
||||
pass
|
||||
try:
|
||||
poll_poller.pollster.unregister(fd)
|
||||
except (AttributeError, KeyError, TypeError, IOError):
|
||||
# no poll used, or not registered
|
||||
pass
|
||||
self._fileno = None
|
||||
self.poller_flags = 0
|
||||
self.poller_filter = 0
|
||||
self.poller_registered = False
|
||||
|
||||
def create_socket(
|
||||
self, family=socket.AF_INET, socket_type=socket.SOCK_STREAM):
|
||||
"""Create a socket"""
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.family_and_type = family, socket_type
|
||||
sock = socket.socket(family, socket_type)
|
||||
sock.setblocking(0)
|
||||
self.set_socket(sock)
|
||||
|
||||
def set_socket(self, sock, map=None):
|
||||
"""Set socket"""
|
||||
self.socket = sock
|
||||
self._fileno = sock.fileno()
|
||||
self.add_channel(map)
|
||||
|
||||
def set_reuse_addr(self):
|
||||
"""try to re-use a server port if possible"""
|
||||
try:
|
||||
self.socket.setsockopt(
|
||||
socket.SOL_SOCKET, socket.SO_REUSEADDR, self.socket.getsockopt(
|
||||
socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1
|
||||
)
|
||||
except socket.error:
|
||||
pass
|
||||
|
||||
# ==================================================
|
||||
# predicates for select()
|
||||
# these are used as filters for the lists of sockets
|
||||
# to pass to select().
|
||||
# ==================================================
|
||||
|
||||
def readable(self):
|
||||
"""Predicate to indicate download throttle status"""
|
||||
if maxDownloadRate > 0:
|
||||
return downloadBucket > dispatcher.minTx
|
||||
return True
|
||||
|
||||
def writable(self):
|
||||
"""Predicate to indicate upload throttle status"""
|
||||
if maxUploadRate > 0:
|
||||
return uploadBucket > dispatcher.minTx
|
||||
return True
|
||||
|
||||
# ==================================================
|
||||
# socket object methods.
|
||||
# ==================================================
|
||||
|
||||
def listen(self, num):
|
||||
"""Listen on a port"""
|
||||
self.accepting = True
|
||||
if os.name == 'nt' and num > 5:
|
||||
num = 5
|
||||
return self.socket.listen(num)
|
||||
|
||||
def bind(self, addr):
|
||||
"""Bind to an address"""
|
||||
self.addr = addr
|
||||
return self.socket.bind(addr)
|
||||
|
||||
def connect(self, address):
|
||||
"""Connect to an address"""
|
||||
self.connected = False
|
||||
self.connecting = True
|
||||
err = self.socket.connect_ex(address)
|
||||
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK, WSAEWOULDBLOCK) \
|
||||
or err == EINVAL and os.name in ('nt', 'ce'):
|
||||
self.addr = address
|
||||
return
|
||||
if err in (0, EISCONN):
|
||||
self.addr = address
|
||||
self.handle_connect_event()
|
||||
else:
|
||||
raise socket.error(err, errorcode[err])
|
||||
|
||||
def accept(self):
|
||||
"""Accept incoming connections.
|
||||
Returns either an address pair or None."""
|
||||
try:
|
||||
conn, addr = self.socket.accept()
|
||||
except TypeError:
|
||||
return None
|
||||
except socket.error as why:
|
||||
if why.args[0] in (
|
||||
EWOULDBLOCK, WSAEWOULDBLOCK, ECONNABORTED,
|
||||
EAGAIN, ENOTCONN):
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
return conn, addr
|
||||
|
||||
def send(self, data):
|
||||
"""Send data"""
|
||||
try:
|
||||
result = self.socket.send(data)
|
||||
return result
|
||||
except socket.error as why:
|
||||
if why.args[0] in (EAGAIN, EWOULDBLOCK, WSAEWOULDBLOCK):
|
||||
return 0
|
||||
elif why.args[0] in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return 0
|
||||
else:
|
||||
raise
|
||||
|
||||
def recv(self, buffer_size):
|
||||
"""Receive data"""
|
||||
try:
|
||||
data = self.socket.recv(buffer_size)
|
||||
if not data:
|
||||
# a closed connection is indicated by signaling
|
||||
# a read condition, and having recv() return 0.
|
||||
self.handle_close()
|
||||
return b''
|
||||
return data
|
||||
except socket.error as why:
|
||||
# winsock sometimes raises ENOTCONN
|
||||
if why.args[0] in (EAGAIN, EWOULDBLOCK, WSAEWOULDBLOCK):
|
||||
return b''
|
||||
if why.args[0] in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return b''
|
||||
else:
|
||||
raise
|
||||
|
||||
def close(self):
|
||||
"""Close connection"""
|
||||
self.connected = False
|
||||
self.accepting = False
|
||||
self.connecting = False
|
||||
self.del_channel()
|
||||
try:
|
||||
self.socket.close()
|
||||
except socket.error as why:
|
||||
if why.args[0] not in (ENOTCONN, EBADF):
|
||||
raise
|
||||
|
||||
# cheap inheritance, used to pass all other attribute
|
||||
# references to the underlying socket object.
|
||||
def __getattr__(self, attr):
|
||||
try:
|
||||
retattr = getattr(self.socket, attr)
|
||||
except AttributeError:
|
||||
raise AttributeError("{} instance has no attribute {}"
|
||||
.format(self.__class__.__name__, attr))
|
||||
|
||||
else:
|
||||
msg = "%(me)s.%(attr)s is deprecated; use %(me)s.socket.%(attr)s"\
|
||||
" instead" % {'me': self.__class__.__name__, 'attr': attr}
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=2)
|
||||
return retattr
|
||||
|
||||
# log and log_info may be overridden to provide more sophisticated
|
||||
# logging and warning methods. In general, log is for 'hit' logging
|
||||
# and 'log_info' is for informational, warning and error logging.
|
||||
|
||||
def log(self, message):
|
||||
"""Log a message to stderr"""
|
||||
sys.stderr.write('log: %s\n' % str(message))
|
||||
|
||||
def log_info(self, message, log_type='info'):
|
||||
"""Conditionally print a message"""
|
||||
if log_type not in self.ignore_log_types:
|
||||
print('{}: {}'.format(log_type, message))
|
||||
|
||||
def handle_read_event(self):
|
||||
"""Handle a read event"""
|
||||
if self.accepting:
|
||||
# accepting sockets are never connected, they "spawn" new
|
||||
# sockets that are connected
|
||||
self.handle_accept()
|
||||
elif not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_read()
|
||||
else:
|
||||
self.handle_read()
|
||||
|
||||
def handle_connect_event(self):
|
||||
"""Handle a connection event"""
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
raise socket.error(err, _strerror(err))
|
||||
self.handle_connect()
|
||||
self.connected = True
|
||||
self.connecting = False
|
||||
|
||||
def handle_write_event(self):
|
||||
"""Handle a write event"""
|
||||
if self.accepting:
|
||||
# Accepting sockets shouldn't get a write event.
|
||||
# We will pretend it didn't happen.
|
||||
return
|
||||
|
||||
if not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_write()
|
||||
|
||||
def handle_expt_event(self):
|
||||
"""Handle expected exceptions"""
|
||||
# handle_expt_event() is called if there might be an error on the
|
||||
# socket, or if there is OOB data
|
||||
# check for the error condition first
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
# we can get here when select.select() says that there is an
|
||||
# exceptional condition on the socket
|
||||
# since there is an error, we'll go ahead and close the socket
|
||||
# like we would in a subclassed handle_read() that received no
|
||||
# data
|
||||
self.handle_close()
|
||||
elif sys.platform.startswith("win"):
|
||||
# async connect failed
|
||||
self.handle_close()
|
||||
else:
|
||||
self.handle_expt()
|
||||
|
||||
def handle_error(self):
|
||||
"""Handle unexpected exceptions"""
|
||||
_, t, v, tbinfo = compact_traceback()
|
||||
|
||||
# sometimes a user repr method will crash.
|
||||
try:
|
||||
self_repr = repr(self)
|
||||
except BaseException:
|
||||
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
|
||||
|
||||
self.log_info(
|
||||
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
|
||||
self_repr, t, v, tbinfo),
|
||||
'error')
|
||||
self.handle_close()
|
||||
|
||||
def handle_accept(self):
|
||||
"""Handle an accept event"""
|
||||
pair = self.accept()
|
||||
if pair is not None:
|
||||
self.handle_accepted(*pair)
|
||||
|
||||
def handle_expt(self):
|
||||
"""Log that the subclass does not implement handle_expt"""
|
||||
self.log_info('unhandled incoming priority event', 'warning')
|
||||
|
||||
def handle_read(self):
|
||||
"""Log that the subclass does not implement handle_read"""
|
||||
self.log_info('unhandled read event', 'warning')
|
||||
|
||||
def handle_write(self):
|
||||
"""Log that the subclass does not implement handle_write"""
|
||||
self.log_info('unhandled write event', 'warning')
|
||||
|
||||
def handle_connect(self):
|
||||
"""Log that the subclass does not implement handle_connect"""
|
||||
self.log_info('unhandled connect event', 'warning')
|
||||
|
||||
def handle_accepted(self, sock, addr):
|
||||
"""Log that the subclass does not implement handle_accepted"""
|
||||
sock.close()
|
||||
self.log_info('unhandled accepted event on %s' % (addr), 'warning')
|
||||
|
||||
def handle_close(self):
|
||||
"""Log that the subclass does not implement handle_close"""
|
||||
self.log_info('unhandled close event', 'warning')
|
||||
self.close()
|
||||
|
||||
|
||||
class dispatcher_with_send(dispatcher):
|
||||
"""
|
||||
adds simple buffered output capability, useful for simple clients.
|
||||
[for more sophisticated usage use asynchat.async_chat]
|
||||
"""
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
dispatcher.__init__(self, sock, map)
|
||||
self.out_buffer = b''
|
||||
|
||||
def initiate_send(self):
|
||||
"""Initiate a send"""
|
||||
num_sent = 0
|
||||
num_sent = dispatcher.send(self, self.out_buffer[:512])
|
||||
self.out_buffer = self.out_buffer[num_sent:]
|
||||
|
||||
def handle_write(self):
|
||||
"""Handle a write event"""
|
||||
self.initiate_send()
|
||||
|
||||
def writable(self):
|
||||
"""Predicate to indicate if the object is writable"""
|
||||
return not self.connected or len(self.out_buffer)
|
||||
|
||||
def send(self, data):
|
||||
"""Send data"""
|
||||
if self.debug:
|
||||
self.log_info('sending %s' % repr(data))
|
||||
self.out_buffer = self.out_buffer + data
|
||||
self.initiate_send()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# used for debugging.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def compact_traceback():
|
||||
"""Return a compact traceback"""
|
||||
t, v, tb = sys.exc_info()
|
||||
tbinfo = []
|
||||
# Must have a traceback
|
||||
if not tb:
|
||||
raise AssertionError("traceback does not exist")
|
||||
while tb:
|
||||
tbinfo.append((
|
||||
tb.tb_frame.f_code.co_filename,
|
||||
tb.tb_frame.f_code.co_name,
|
||||
str(tb.tb_lineno)
|
||||
))
|
||||
tb = tb.tb_next
|
||||
|
||||
# just to be safe
|
||||
del tb
|
||||
|
||||
filename, function, line = tbinfo[-1]
|
||||
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
|
||||
return (filename, function, line), t, v, info
|
||||
|
||||
|
||||
def close_all(map=None, ignore_all=False):
|
||||
"""Close all connections"""
|
||||
|
||||
if map is None:
|
||||
map = socket_map
|
||||
for x in list(map.values()):
|
||||
try:
|
||||
x.close()
|
||||
except OSError as e:
|
||||
if e.args[0] == EBADF:
|
||||
pass
|
||||
elif not ignore_all:
|
||||
raise
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except BaseException:
|
||||
if not ignore_all:
|
||||
raise
|
||||
map.clear()
|
||||
|
||||
|
||||
# Asynchronous File I/O:
|
||||
#
|
||||
# After a little research (reading man pages on various unixen, and
|
||||
# digging through the linux kernel), I've determined that select()
|
||||
# isn't meant for doing asynchronous file i/o.
|
||||
# Heartening, though - reading linux/mm/filemap.c shows that linux
|
||||
# supports asynchronous read-ahead. So _MOST_ of the time, the data
|
||||
# will be sitting in memory for us already when we go to read it.
|
||||
#
|
||||
# What other OS's (besides NT) support async file i/o? [VMS?]
|
||||
#
|
||||
# Regardless, this is useful for pipes, and stdin/stdout...
|
||||
|
||||
|
||||
if os.name == 'posix':
|
||||
import fcntl
|
||||
|
||||
class file_wrapper: # pylint: disable=old-style-class
|
||||
"""
|
||||
Here we override just enough to make a file look
|
||||
like a socket for the purposes of asyncore.
|
||||
|
||||
The passed fd is automatically os.dup()'d
|
||||
"""
|
||||
|
||||
def __init__(self, fd):
|
||||
self.fd = os.dup(fd)
|
||||
|
||||
def recv(self, *args):
|
||||
"""Fake recv()"""
|
||||
return os.read(self.fd, *args)
|
||||
|
||||
def send(self, *args):
|
||||
"""Fake send()"""
|
||||
return os.write(self.fd, *args)
|
||||
|
||||
def getsockopt(self, level, optname, buflen=None):
|
||||
"""Fake getsockopt()"""
|
||||
if (level == socket.SOL_SOCKET and optname == socket.SO_ERROR and
|
||||
not buflen):
|
||||
return 0
|
||||
raise NotImplementedError(
|
||||
"Only asyncore specific behaviour implemented.")
|
||||
|
||||
read = recv
|
||||
write = send
|
||||
|
||||
def close(self):
|
||||
"""Fake close()"""
|
||||
os.close(self.fd)
|
||||
|
||||
def fileno(self):
|
||||
"""Fake fileno()"""
|
||||
return self.fd
|
||||
|
||||
class file_dispatcher(dispatcher):
|
||||
"""A dispatcher for file_wrapper objects"""
|
||||
|
||||
def __init__(self, fd, map=None):
|
||||
dispatcher.__init__(self, None, map)
|
||||
self.connected = True
|
||||
try:
|
||||
fd = fd.fileno()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.set_file(fd)
|
||||
# set it to non-blocking mode
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
|
||||
flags = flags | os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
|
||||
def set_file(self, fd):
|
||||
"""Set file"""
|
||||
self.socket = file_wrapper(fd)
|
||||
self._fileno = self.socket.fileno()
|
||||
self.add_channel()
|
|
@ -1,165 +0,0 @@
|
|||
"""
|
||||
BMObject and it's exceptions.
|
||||
"""
|
||||
import logging
|
||||
import time
|
||||
|
||||
import protocol
|
||||
import state
|
||||
from addresses import calculateInventoryHash
|
||||
from inventory import Inventory
|
||||
from network.dandelion import Dandelion
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class BMObjectInsufficientPOWError(Exception):
|
||||
"""Exception indicating the object
|
||||
doesn't have sufficient proof of work."""
|
||||
errorCodes = ("Insufficient proof of work")
|
||||
|
||||
|
||||
class BMObjectInvalidDataError(Exception):
|
||||
"""Exception indicating the data being parsed
|
||||
does not match the specification."""
|
||||
errorCodes = ("Data invalid")
|
||||
|
||||
|
||||
class BMObjectExpiredError(Exception):
|
||||
"""Exception indicating the object's lifetime has expired."""
|
||||
errorCodes = ("Object expired")
|
||||
|
||||
|
||||
class BMObjectUnwantedStreamError(Exception):
|
||||
"""Exception indicating the object is in a stream
|
||||
we didn't advertise as being interested in."""
|
||||
errorCodes = ("Object in unwanted stream")
|
||||
|
||||
|
||||
class BMObjectInvalidError(Exception):
|
||||
"""The object's data does not match object specification."""
|
||||
errorCodes = ("Invalid object")
|
||||
|
||||
|
||||
class BMObjectAlreadyHaveError(Exception):
|
||||
"""We received a duplicate object (one we already have)"""
|
||||
errorCodes = ("Already have this object")
|
||||
|
||||
|
||||
class BMObject(object): # pylint: disable=too-many-instance-attributes
|
||||
"""Bitmessage Object as a class."""
|
||||
|
||||
# max TTL, 28 days and 3 hours
|
||||
maxTTL = 28 * 24 * 60 * 60 + 10800
|
||||
# min TTL, 3 hour (in the past
|
||||
minTTL = -3600
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
nonce,
|
||||
expiresTime,
|
||||
objectType,
|
||||
version,
|
||||
streamNumber,
|
||||
data,
|
||||
payloadOffset
|
||||
): # pylint: disable=too-many-arguments
|
||||
self.nonce = nonce
|
||||
self.expiresTime = expiresTime
|
||||
self.objectType = objectType
|
||||
self.version = version
|
||||
self.streamNumber = streamNumber
|
||||
self.inventoryHash = calculateInventoryHash(data)
|
||||
# copy to avoid memory issues
|
||||
self.data = bytearray(data)
|
||||
self.tag = self.data[payloadOffset:payloadOffset + 32]
|
||||
|
||||
def checkProofOfWorkSufficient(self):
|
||||
"""Perform a proof of work check for sufficiency."""
|
||||
# Let us check to make sure that the proof of work is sufficient.
|
||||
if not protocol.isProofOfWorkSufficient(self.data):
|
||||
logger.info('Proof of work is insufficient.')
|
||||
raise BMObjectInsufficientPOWError()
|
||||
|
||||
def checkEOLSanity(self):
|
||||
"""Check if object's lifetime
|
||||
isn't ridiculously far in the past or future."""
|
||||
# EOL sanity check
|
||||
if self.expiresTime - int(time.time()) > BMObject.maxTTL:
|
||||
logger.info(
|
||||
'This object\'s End of Life time is too far in the future.'
|
||||
' Ignoring it. Time is %i', self.expiresTime)
|
||||
# .. todo:: remove from download queue
|
||||
raise BMObjectExpiredError()
|
||||
|
||||
if self.expiresTime - int(time.time()) < BMObject.minTTL:
|
||||
logger.info(
|
||||
'This object\'s End of Life time was too long ago.'
|
||||
' Ignoring the object. Time is %i', self.expiresTime)
|
||||
# .. todo:: remove from download queue
|
||||
raise BMObjectExpiredError()
|
||||
|
||||
def checkStream(self):
|
||||
"""Check if object's stream matches streams we are interested in"""
|
||||
if self.streamNumber not in state.streamsInWhichIAmParticipating:
|
||||
logger.debug(
|
||||
'The streamNumber %i isn\'t one we are interested in.',
|
||||
self.streamNumber)
|
||||
raise BMObjectUnwantedStreamError()
|
||||
|
||||
def checkAlreadyHave(self):
|
||||
"""
|
||||
Check if we already have the object
|
||||
(so that we don't duplicate it in inventory
|
||||
or advertise it unnecessarily)
|
||||
"""
|
||||
# if it's a stem duplicate, pretend we don't have it
|
||||
# pylint: disable=protected-access
|
||||
if Dandelion().hasHash(self.inventoryHash):
|
||||
return
|
||||
if self.inventoryHash in Inventory():
|
||||
raise BMObjectAlreadyHaveError()
|
||||
|
||||
def checkObjectByType(self):
|
||||
"""Call a object type specific check
|
||||
(objects can have additional checks based on their types)"""
|
||||
if self.objectType == protocol.OBJECT_GETPUBKEY:
|
||||
self.checkGetpubkey()
|
||||
elif self.objectType == protocol.OBJECT_PUBKEY:
|
||||
self.checkPubkey()
|
||||
elif self.objectType == protocol.OBJECT_MSG:
|
||||
self.checkMessage()
|
||||
elif self.objectType == protocol.OBJECT_BROADCAST:
|
||||
self.checkBroadcast()
|
||||
# other objects don't require other types of tests
|
||||
|
||||
def checkMessage(self): # pylint: disable=no-self-use
|
||||
""""Message" object type checks."""
|
||||
return
|
||||
|
||||
def checkGetpubkey(self):
|
||||
""""Getpubkey" object type checks."""
|
||||
if len(self.data) < 42:
|
||||
logger.info(
|
||||
'getpubkey message doesn\'t contain enough data. Ignoring.')
|
||||
raise BMObjectInvalidError()
|
||||
|
||||
def checkPubkey(self):
|
||||
""""Pubkey" object type checks."""
|
||||
# sanity check
|
||||
if len(self.data) < 146 or len(self.data) > 440:
|
||||
logger.info('pubkey object too short or too long. Ignoring.')
|
||||
raise BMObjectInvalidError()
|
||||
|
||||
def checkBroadcast(self):
|
||||
""""Broadcast" object type checks."""
|
||||
if len(self.data) < 180:
|
||||
logger.debug(
|
||||
'The payload length of this broadcast'
|
||||
' packet is unreasonably low. Someone is probably'
|
||||
' trying funny business. Ignoring message.')
|
||||
raise BMObjectInvalidError()
|
||||
|
||||
# this isn't supported anymore
|
||||
if self.version < 2:
|
||||
raise BMObjectInvalidError()
|
|
@ -1,702 +0,0 @@
|
|||
"""
|
||||
Bitmessage Protocol
|
||||
"""
|
||||
# pylint: disable=attribute-defined-outside-init,too-few-public-methods,logging-format-interpolation,protected-access
|
||||
import base64
|
||||
import hashlib
|
||||
import logging
|
||||
import socket
|
||||
import struct
|
||||
import time
|
||||
from binascii import hexlify
|
||||
|
||||
import addresses
|
||||
from network import connectionpool
|
||||
import knownnodes
|
||||
import protocol
|
||||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from inventory import Inventory
|
||||
from network.advanceddispatcher import AdvancedDispatcher
|
||||
from network.bmobject import (
|
||||
BMObject, BMObjectAlreadyHaveError, BMObjectExpiredError,
|
||||
BMObjectInsufficientPOWError, BMObjectInvalidDataError,
|
||||
BMObjectInvalidError, BMObjectUnwantedStreamError
|
||||
)
|
||||
from network.constants import (
|
||||
ADDRESS_ALIVE, MAX_MESSAGE_SIZE, MAX_OBJECT_COUNT,
|
||||
MAX_OBJECT_PAYLOAD_SIZE, MAX_TIME_OFFSET
|
||||
)
|
||||
from network.dandelion import Dandelion
|
||||
from network.proxy import ProxyError
|
||||
from network.objectracker import missingObjects, ObjectTracker
|
||||
from network.node import Node, Peer
|
||||
from queues import objectProcessorQueue, portCheckerQueue, invQueue
|
||||
from network.randomtrackingdict import RandomTrackingDict
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class BMProtoError(ProxyError):
|
||||
"""A Bitmessage Protocol Base Error"""
|
||||
errorCodes = ("Protocol error")
|
||||
|
||||
|
||||
class BMProtoInsufficientDataError(BMProtoError):
|
||||
"""A Bitmessage Protocol Insufficient Data Error"""
|
||||
errorCodes = ("Insufficient data")
|
||||
|
||||
|
||||
class BMProtoExcessiveDataError(BMProtoError):
|
||||
"""A Bitmessage Protocol Excessive Data Error"""
|
||||
errorCodes = ("Too much data")
|
||||
|
||||
|
||||
class BMProto(AdvancedDispatcher, ObjectTracker):
|
||||
"""A parser for the Bitmessage Protocol"""
|
||||
# pylint: disable=too-many-instance-attributes, too-many-public-methods
|
||||
timeOffsetWrongCount = 0
|
||||
|
||||
def __init__(self, address=None, sock=None):
|
||||
# pylint: disable=unused-argument, super-init-not-called
|
||||
AdvancedDispatcher.__init__(self, sock)
|
||||
self.isOutbound = False
|
||||
# packet/connection from a local IP
|
||||
self.local = False
|
||||
self.pendingUpload = RandomTrackingDict()
|
||||
# canonical identifier of network group
|
||||
self.network_group = None
|
||||
|
||||
def bm_proto_reset(self):
|
||||
"""Reset the bitmessage object parser"""
|
||||
self.magic = None
|
||||
self.command = None
|
||||
self.payloadLength = 0
|
||||
self.checksum = None
|
||||
self.payload = None
|
||||
self.invalid = False
|
||||
self.payloadOffset = 0
|
||||
self.expectBytes = protocol.Header.size
|
||||
self.object = None
|
||||
|
||||
def state_bm_header(self):
|
||||
"""Process incoming header"""
|
||||
self.magic, self.command, self.payloadLength, self.checksum = \
|
||||
protocol.Header.unpack(self.read_buf[:protocol.Header.size])
|
||||
# its shoule be in string
|
||||
self.command = self.command.rstrip('\x00'.encode('utf-8'))
|
||||
# pylint: disable=global-statement
|
||||
if self.magic != 0xE9BEB4D9:
|
||||
self.set_state("bm_header", length=1)
|
||||
self.bm_proto_reset()
|
||||
logger.debug('Bad magic')
|
||||
if self.socket.type == socket.SOCK_STREAM:
|
||||
self.close_reason = "Bad magic"
|
||||
self.set_state("close")
|
||||
return False
|
||||
if self.payloadLength > MAX_MESSAGE_SIZE:
|
||||
self.invalid = True
|
||||
self.set_state(
|
||||
"bm_command",
|
||||
length=protocol.Header.size, expectBytes=self.payloadLength)
|
||||
return True
|
||||
|
||||
def state_bm_command(self):
|
||||
# pylint: disable=too-many-branches, too-many-statements
|
||||
"""Process incoming command"""
|
||||
self.payload = self.read_buf[:self.payloadLength]
|
||||
if self.checksum != hashlib.sha512(self.payload).digest()[0:4]:
|
||||
logger.debug('Bad checksum, ignoring')
|
||||
self.invalid = True
|
||||
retval = True
|
||||
if not self.fullyEstablished and self.command not in (
|
||||
"error".encode(), "version".encode(), "verack".encode()):
|
||||
logger.error(
|
||||
'Received command {} before connection was fully'
|
||||
' established, ignoring'.format(self.command))
|
||||
self.invalid = True
|
||||
if not self.invalid:
|
||||
try:
|
||||
command = self.command.decode() if self.command else self.command
|
||||
|
||||
retval = getattr(
|
||||
self, "bm_command_" + command)()
|
||||
except AttributeError:
|
||||
# unimplemented command
|
||||
logger.debug('unimplemented command %s', self.command)
|
||||
except BMProtoInsufficientDataError:
|
||||
logger.debug('packet length too short, skipping')
|
||||
except BMProtoExcessiveDataError:
|
||||
logger.debug('too much data, skipping')
|
||||
except BMObjectInsufficientPOWError:
|
||||
logger.debug('insufficient PoW, skipping')
|
||||
except BMObjectInvalidDataError:
|
||||
logger.debug('object invalid data, skipping')
|
||||
except BMObjectExpiredError:
|
||||
logger.debug('object expired, skipping')
|
||||
except BMObjectUnwantedStreamError:
|
||||
logger.debug('object not in wanted stream, skipping')
|
||||
except BMObjectInvalidError:
|
||||
logger.debug('object invalid, skipping')
|
||||
except BMObjectAlreadyHaveError:
|
||||
logger.debug(
|
||||
'%(host)s:%(port)i already got object, skipping',
|
||||
self.destination._asdict())
|
||||
except struct.error:
|
||||
logger.debug('decoding error, skipping')
|
||||
except ValueError:
|
||||
pass
|
||||
elif self.socket.type == socket.SOCK_DGRAM:
|
||||
# broken read, ignore
|
||||
pass
|
||||
else:
|
||||
logger.debug('Closing due to invalid command {}'.format(self.command))
|
||||
self.close_reason = ("Invalid command {}".format(self.command))
|
||||
self.set_state("close")
|
||||
return False
|
||||
if retval:
|
||||
self.set_state("bm_header", length=self.payloadLength)
|
||||
self.bm_proto_reset()
|
||||
# else assume the command requires a different state to follow
|
||||
return True
|
||||
|
||||
def decode_payload_string(self, length):
|
||||
"""Read and return `length` bytes from payload"""
|
||||
value = self.payload[self.payloadOffset:self.payloadOffset + length]
|
||||
self.payloadOffset += length
|
||||
return value
|
||||
|
||||
def decode_payload_varint(self):
|
||||
"""Decode a varint from the payload"""
|
||||
value, offset = addresses.decodeVarint(
|
||||
self.payload[self.payloadOffset:])
|
||||
self.payloadOffset += offset
|
||||
return value
|
||||
|
||||
def decode_payload_node(self):
|
||||
"""Decode node details from the payload"""
|
||||
# protocol.checkIPAddress()
|
||||
services, host, port = self.decode_payload_content("Q16sH")
|
||||
if host[0:12] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF'.encode('raw_unicode_escape'):
|
||||
host = socket.inet_ntop(socket.AF_INET, host[12:16])
|
||||
elif host[0:6] == '\xfd\x87\xd8\x7e\xeb\x43'.encode('raw_unicode_escape'):
|
||||
# Onion, based on BMD/bitcoind
|
||||
host = base64.b32encode(host[6:]).lower() + ".onion"
|
||||
else:
|
||||
host = socket.inet_ntop(socket.AF_INET6, host)
|
||||
if host == "":
|
||||
# This can happen on Windows systems which are not 64-bit
|
||||
# compatible so let us drop the IPv6 address.
|
||||
host = socket.inet_ntop(socket.AF_INET, host[12:16])
|
||||
|
||||
return Node(services, host, port)
|
||||
|
||||
# pylint: disable=too-many-branches, too-many-statements
|
||||
def decode_payload_content(self, pattern="v"):
|
||||
"""
|
||||
Decode the payload depending on pattern:
|
||||
|
||||
L = varint indicating the length of the next array
|
||||
l = varint indicating the length of the next item
|
||||
v = varint (or array)
|
||||
H = uint16
|
||||
I = uint32
|
||||
Q = uint64
|
||||
i = net_addr (without time and stream number)
|
||||
s = string
|
||||
0-9 = length of the next item
|
||||
, = end of array
|
||||
"""
|
||||
|
||||
# pylint: disable=inconsistent-return-statements
|
||||
def decode_simple(self, char="v"):
|
||||
"""Decode the payload using one char pattern"""
|
||||
if char == "v":
|
||||
return self.decode_payload_varint()
|
||||
if char == "i":
|
||||
return self.decode_payload_node()
|
||||
if char == "H":
|
||||
self.payloadOffset += 2
|
||||
return struct.unpack(">H", self.payload[
|
||||
self.payloadOffset - 2:self.payloadOffset])[0]
|
||||
if char == "I":
|
||||
self.payloadOffset += 4
|
||||
return struct.unpack(">I", self.payload[
|
||||
self.payloadOffset - 4:self.payloadOffset])[0]
|
||||
if char == "Q":
|
||||
self.payloadOffset += 8
|
||||
return struct.unpack(">Q", self.payload[
|
||||
self.payloadOffset - 8:self.payloadOffset])[0]
|
||||
|
||||
size = None
|
||||
isArray = False
|
||||
|
||||
# size
|
||||
# iterator starting from size counting to 0
|
||||
# isArray?
|
||||
# subpattern
|
||||
# position of parser in subpattern
|
||||
# retval (array)
|
||||
parserStack = [[1, 1, False, pattern, 0, []]]
|
||||
|
||||
while True:
|
||||
i = parserStack[-1][3][parserStack[-1][4]]
|
||||
if i in "0123456789" and (
|
||||
size is None or parserStack[-1][3][parserStack[-1][4] - 1]
|
||||
not in "lL"):
|
||||
try:
|
||||
size = size * 10 + int(i)
|
||||
except TypeError:
|
||||
size = int(i)
|
||||
isArray = False
|
||||
elif i in "Ll" and size is None:
|
||||
size = self.decode_payload_varint()
|
||||
isArray = i == "L"
|
||||
elif size is not None:
|
||||
if isArray:
|
||||
parserStack.append([
|
||||
size, size, isArray,
|
||||
parserStack[-1][3][parserStack[-1][4]:], 0, []
|
||||
])
|
||||
parserStack[-2][4] = len(parserStack[-2][3])
|
||||
else:
|
||||
for j in range(parserStack[-1][4], len(parserStack[-1][3])):
|
||||
if parserStack[-1][3][j] not in "lL0123456789":
|
||||
break
|
||||
# pylint: disable=undefined-loop-variable
|
||||
parserStack.append([
|
||||
size, size, isArray,
|
||||
parserStack[-1][3][parserStack[-1][4]:j + 1], 0, []
|
||||
])
|
||||
parserStack[-2][4] += len(parserStack[-1][3]) - 1
|
||||
size = None
|
||||
continue
|
||||
elif i == "s":
|
||||
# if parserStack[-2][2]:
|
||||
# parserStack[-1][5].append(self.payload[
|
||||
# self.payloadOffset:self.payloadOffset + parserStack[-1][0]])
|
||||
# else:
|
||||
parserStack[-1][5] = self.payload[
|
||||
self.payloadOffset:self.payloadOffset + parserStack[-1][0]]
|
||||
self.payloadOffset += parserStack[-1][0]
|
||||
parserStack[-1][1] = 0
|
||||
parserStack[-1][2] = True
|
||||
# del parserStack[-1]
|
||||
size = None
|
||||
elif i in "viHIQ":
|
||||
parserStack[-1][5].append(decode_simple(
|
||||
self, parserStack[-1][3][parserStack[-1][4]]))
|
||||
size = None
|
||||
else:
|
||||
size = None
|
||||
for depth in range(len(parserStack) - 1, -1, -1):
|
||||
parserStack[depth][4] += 1
|
||||
if parserStack[depth][4] >= len(parserStack[depth][3]):
|
||||
parserStack[depth][1] -= 1
|
||||
parserStack[depth][4] = 0
|
||||
if depth > 0:
|
||||
if parserStack[depth][2]:
|
||||
parserStack[depth - 1][5].append(
|
||||
parserStack[depth][5])
|
||||
else:
|
||||
parserStack[depth - 1][5].extend(
|
||||
parserStack[depth][5])
|
||||
parserStack[depth][5] = []
|
||||
if parserStack[depth][1] <= 0:
|
||||
if depth == 0:
|
||||
# we're done, at depth 0 counter is at 0
|
||||
# and pattern is done parsing
|
||||
return parserStack[depth][5]
|
||||
del parserStack[-1]
|
||||
continue
|
||||
break
|
||||
break
|
||||
if self.payloadOffset > self.payloadLength:
|
||||
logger.debug(
|
||||
'Insufficient data %i/%i',
|
||||
self.payloadOffset, self.payloadLength)
|
||||
raise BMProtoInsufficientDataError()
|
||||
|
||||
def bm_command_error(self):
|
||||
"""Decode an error message and log it"""
|
||||
err_values = self.decode_payload_content("vvlsls")
|
||||
fatalStatus = err_values[0]
|
||||
# banTime = err_values[1]
|
||||
# inventoryVector = err_values[2]
|
||||
errorText = err_values[3]
|
||||
logger.error(
|
||||
'%s:%i error: %i, %s', self.destination.host,
|
||||
self.destination.port, fatalStatus, errorText)
|
||||
return True
|
||||
|
||||
def bm_command_getdata(self):
|
||||
"""
|
||||
Incoming request for object(s).
|
||||
If we have them and some other conditions are fulfilled,
|
||||
append them to the write queue.
|
||||
"""
|
||||
# 32 an array bit long strings
|
||||
items = self.decode_payload_content("l32s")
|
||||
# skip?
|
||||
now = time.time()
|
||||
if now < self.skipUntil:
|
||||
return True
|
||||
for i in items:
|
||||
self.pendingUpload[bytes(i)] = now
|
||||
return True
|
||||
|
||||
def _command_inv(self, dandelion=False):
|
||||
items = self.decode_payload_content("l32s")
|
||||
|
||||
if len(items) > MAX_OBJECT_COUNT:
|
||||
logger.error(
|
||||
'Too many items in %sinv message!', 'd' if dandelion else '')
|
||||
raise BMProtoExcessiveDataError()
|
||||
|
||||
# ignore dinv if dandelion turned off
|
||||
if dandelion and not state.dandelion:
|
||||
return True
|
||||
for i in map(bytes, items):
|
||||
if i in Inventory() and not Dandelion().hasHash(i):
|
||||
continue
|
||||
if dandelion and not Dandelion().hasHash(i):
|
||||
Dandelion().addHash(i, self)
|
||||
self.handleReceivedInventory(i)
|
||||
|
||||
return True
|
||||
|
||||
def bm_command_inv(self):
|
||||
"""Non-dandelion announce"""
|
||||
return self._command_inv(False)
|
||||
|
||||
def bm_command_dinv(self):
|
||||
"""Dandelion stem announce"""
|
||||
return self._command_inv(True)
|
||||
|
||||
def bm_command_object(self):
|
||||
"""Incoming object, process it"""
|
||||
objectOffset = self.payloadOffset
|
||||
nonce, expiresTime, objectType, version, streamNumber = \
|
||||
self.decode_payload_content("QQIvv")
|
||||
self.object = BMObject(
|
||||
nonce, expiresTime, objectType, version, streamNumber,
|
||||
self.payload, self.payloadOffset)
|
||||
|
||||
if len(self.payload) - self.payloadOffset > MAX_OBJECT_PAYLOAD_SIZE:
|
||||
logger.info(
|
||||
'The payload length of this object is too large (%d bytes).'
|
||||
' Ignoring it.', len(self.payload) - self.payloadOffset)
|
||||
raise BMProtoExcessiveDataError()
|
||||
|
||||
try:
|
||||
self.object.checkProofOfWorkSufficient()
|
||||
self.object.checkEOLSanity()
|
||||
self.object.checkAlreadyHave()
|
||||
except (BMObjectExpiredError, BMObjectAlreadyHaveError,
|
||||
BMObjectInsufficientPOWError):
|
||||
BMProto.stopDownloadingObject(self.object.inventoryHash)
|
||||
raise
|
||||
try:
|
||||
self.object.checkStream()
|
||||
except BMObjectUnwantedStreamError:
|
||||
acceptmismatch = BMConfigParser().get(
|
||||
"inventory", "acceptmismatch")
|
||||
BMProto.stopDownloadingObject(
|
||||
self.object.inventoryHash, acceptmismatch)
|
||||
if not acceptmismatch:
|
||||
raise
|
||||
|
||||
try:
|
||||
self.object.checkObjectByType()
|
||||
objectProcessorQueue.put((
|
||||
self.object.objectType, memoryview(self.object.data)))
|
||||
except BMObjectInvalidError:
|
||||
BMProto.stopDownloadingObject(self.object.inventoryHash, True)
|
||||
else:
|
||||
try:
|
||||
del missingObjects[self.object.inventoryHash]
|
||||
except KeyError:
|
||||
pass
|
||||
if self.object.inventoryHash in Inventory() and Dandelion().hasHash(self.object.inventoryHash):
|
||||
Dandelion().removeHash(self.object.inventoryHash, "cycle detection")
|
||||
Inventory()[self.object.inventoryHash] = (
|
||||
self.object.objectType, self.object.streamNumber,
|
||||
memoryview(self.payload[objectOffset:]), self.object.expiresTime,
|
||||
memoryview(self.object.tag)
|
||||
)
|
||||
self.handleReceivedObject(
|
||||
self.object.streamNumber, self.object.inventoryHash)
|
||||
invQueue.put((
|
||||
self.object.streamNumber, self.object.inventoryHash,
|
||||
self.destination))
|
||||
return True
|
||||
|
||||
def _decode_addr(self):
|
||||
return self.decode_payload_content("LQIQ16sH")
|
||||
|
||||
def bm_command_addr(self):
|
||||
"""Incoming addresses, process them"""
|
||||
addresses = self._decode_addr() # pylint: disable=redefined-outer-name
|
||||
for i in addresses:
|
||||
seenTime, stream, _, ip, port = i
|
||||
decodedIP = protocol.checkIPAddress(bytes(ip))
|
||||
|
||||
if stream not in state.streamsInWhichIAmParticipating:
|
||||
continue
|
||||
if (
|
||||
decodedIP
|
||||
and time.time() - seenTime > 0
|
||||
and seenTime > time.time() - ADDRESS_ALIVE
|
||||
and port > 0
|
||||
):
|
||||
peer = Peer(decodedIP, port)
|
||||
try:
|
||||
if knownnodes.knownNodes[stream][peer]["lastseen"] > \
|
||||
seenTime:
|
||||
continue
|
||||
except KeyError:
|
||||
pass
|
||||
if len(knownnodes.knownNodes[stream]) < BMConfigParser().safeGetInt("knownnodes", "maxnodes"):
|
||||
with knownnodes.knownNodesLock:
|
||||
try:
|
||||
knownnodes.knownNodes[stream][peer]["lastseen"] = \
|
||||
seenTime
|
||||
except (TypeError, KeyError):
|
||||
knownnodes.knownNodes[stream][peer] = {
|
||||
"lastseen": seenTime,
|
||||
"rating": 0,
|
||||
"self": False,
|
||||
}
|
||||
# since we don't track peers outside of knownnodes,
|
||||
# only spread if in knownnodes to prevent flood
|
||||
# DISABLED TO WORKAROUND FLOOD/LEAK
|
||||
# addrQueue.put((stream, peer, seenTime,
|
||||
# self.destination))
|
||||
return True
|
||||
|
||||
def bm_command_portcheck(self):
|
||||
"""Incoming port check request, queue it."""
|
||||
portCheckerQueue.put(Peer(self.destination, self.peerNode.port))
|
||||
return True
|
||||
|
||||
def bm_command_ping(self):
|
||||
"""Incoming ping, respond to it."""
|
||||
self.append_write_buf(protocol.CreatePacket('pong'))
|
||||
return True
|
||||
|
||||
def bm_command_pong(self): # pylint: disable=no-self-use
|
||||
"""
|
||||
Incoming pong.
|
||||
Ignore it. PyBitmessage pings connections after about 5 minutes
|
||||
of inactivity, and leaves it to the TCP stack to handle actual
|
||||
timeouts. So there is no need to do anything when a pong arrives.
|
||||
"""
|
||||
# nothing really
|
||||
return True
|
||||
|
||||
def bm_command_verack(self):
|
||||
"""
|
||||
Incoming verack.
|
||||
If already sent my own verack, handshake is complete (except
|
||||
potentially waiting for buffers to flush), so we can continue
|
||||
to the main connection phase. If not sent verack yet,
|
||||
continue processing.
|
||||
"""
|
||||
self.verackReceived = True
|
||||
if not self.verackSent:
|
||||
return True
|
||||
self.set_state(
|
||||
"tls_init" if self.isSSL else "connection_fully_established",
|
||||
length=self.payloadLength, expectBytes=0)
|
||||
return False
|
||||
|
||||
def bm_command_version(self):
|
||||
"""
|
||||
Incoming version.
|
||||
Parse and log, remember important things, like streams, bitfields, etc.
|
||||
"""
|
||||
decoded = self.decode_payload_content("IQQiiQlslv")
|
||||
(self.remoteProtocolVersion, self.services, self.timestamp,
|
||||
self.sockNode, self.peerNode, self.nonce, self.userAgent
|
||||
) = decoded[:7]
|
||||
self.streams = decoded[7:]
|
||||
self.nonce = struct.pack('>Q', self.nonce)
|
||||
self.timeOffset = self.timestamp - int(time.time())
|
||||
logger.debug('remoteProtocolVersion: %i', self.remoteProtocolVersion)
|
||||
logger.debug('services: 0x%08X', self.services)
|
||||
logger.debug('time offset: %i', self.timeOffset)
|
||||
logger.debug('my external IP: %s', self.sockNode.host)
|
||||
logger.debug(
|
||||
'remote node incoming address: %s:%i',
|
||||
self.destination.host, self.peerNode.port)
|
||||
logger.debug('user agent: %s', self.userAgent)
|
||||
logger.debug('streams: [%s]', ','.join(map(str, self.streams)))
|
||||
if not self.peerValidityChecks():
|
||||
# ABORT afterwards
|
||||
return True
|
||||
self.append_write_buf(protocol.CreatePacket('verack'))
|
||||
self.verackSent = True
|
||||
if not self.isOutbound:
|
||||
self.append_write_buf(protocol.assembleVersionMessage(
|
||||
self.destination.host, self.destination.port,
|
||||
connectionpool.BMConnectionPool().streams, True,
|
||||
nodeid=self.nodeid))
|
||||
logger.debug(
|
||||
'%(host)s:%(port)i sending version',
|
||||
self.destination._asdict())
|
||||
if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL)
|
||||
and protocol.haveSSL(not self.isOutbound)):
|
||||
self.isSSL = True
|
||||
if not self.verackReceived:
|
||||
return True
|
||||
self.set_state(
|
||||
"tls_init" if self.isSSL else "connection_fully_established",
|
||||
length=self.payloadLength, expectBytes=0)
|
||||
return False
|
||||
|
||||
# pylint: disable=too-many-return-statements
|
||||
def peerValidityChecks(self):
|
||||
"""Check the validity of the peer"""
|
||||
if self.remoteProtocolVersion < 3:
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="Your is using an old protocol. Closing connection.",
|
||||
fatal=2))
|
||||
logger.debug(
|
||||
'Closing connection to old protocol version %s, node: %s',
|
||||
self.remoteProtocolVersion, self.destination)
|
||||
return False
|
||||
if self.timeOffset > MAX_TIME_OFFSET:
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="Your time is too far in the future"
|
||||
" compared to mine. Closing connection.", fatal=2))
|
||||
logger.info(
|
||||
"%s's time is too far in the future (%s seconds)."
|
||||
" Closing connection to it.", self.destination, self.timeOffset)
|
||||
BMProto.timeOffsetWrongCount += 1
|
||||
return False
|
||||
elif self.timeOffset < -MAX_TIME_OFFSET:
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="Your time is too far in the past compared to mine."
|
||||
" Closing connection.", fatal=2))
|
||||
logger.info(
|
||||
"%s's time is too far in the past (timeOffset %s seconds)."
|
||||
" Closing connection to it.", self.destination, self.timeOffset)
|
||||
BMProto.timeOffsetWrongCount += 1
|
||||
return False
|
||||
else:
|
||||
BMProto.timeOffsetWrongCount = 0
|
||||
if not self.streams:
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="We don't have shared stream interests."
|
||||
" Closing connection.", fatal=2))
|
||||
logger.debug(
|
||||
'Closed connection to %s because there is no overlapping'
|
||||
' interest in streams.', self.destination)
|
||||
return False
|
||||
if self.destination in connectionpool.BMConnectionPool().inboundConnections:
|
||||
try:
|
||||
if not protocol.checkSocksIP(self.destination.host):
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="Too many connections from your IP."
|
||||
" Closing connection.", fatal=2))
|
||||
logger.debug(
|
||||
'Closed connection to {} because we are already connected'
|
||||
' to that IP.'.format(self.destination))
|
||||
return False
|
||||
except Exception:
|
||||
pass
|
||||
if not self.isOutbound:
|
||||
# incoming from a peer we're connected to as outbound,
|
||||
# or server full report the same error to counter deanonymisation
|
||||
if (
|
||||
Peer(self.destination.host, self.peerNode.port)
|
||||
in connectionpool.BMConnectionPool().inboundConnections
|
||||
or len(connectionpool.BMConnectionPool().inboundConnections)
|
||||
+ len(connectionpool.BMConnectionPool().outboundConnections)
|
||||
> BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'maxtotalconnections')
|
||||
+ BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'maxbootstrapconnections')
|
||||
):
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="Server full, please try again later.", fatal=2))
|
||||
logger.debug(
|
||||
'Closed connection to %s due to server full'
|
||||
' or duplicate inbound/outbound.', self.destination)
|
||||
return False
|
||||
if connectionpool.BMConnectionPool().isAlreadyConnected(
|
||||
self.nonce):
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="I'm connected to myself. Closing connection.",
|
||||
fatal=2))
|
||||
logger.debug(
|
||||
"Closed connection to %s because I'm connected to myself.",
|
||||
self.destination)
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def stopDownloadingObject(hashId, forwardAnyway=False):
|
||||
"""Stop downloading an object"""
|
||||
for connection in connectionpool.BMConnectionPool().connections():
|
||||
try:
|
||||
del connection.objectsNewToMe[hashId]
|
||||
except KeyError:
|
||||
pass
|
||||
if not forwardAnyway:
|
||||
try:
|
||||
with connection.objectsNewToThemLock:
|
||||
del connection.objectsNewToThem[hashId]
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
del missingObjects[hashId]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def handle_close(self):
|
||||
"""Handle close"""
|
||||
self.set_state("close")
|
||||
if not (self.accepting or self.connecting or self.connected):
|
||||
# already disconnected
|
||||
return
|
||||
try:
|
||||
logger.debug(
|
||||
'%s:%i: closing, %s', self.destination.host,
|
||||
self.destination.port, self.close_reason)
|
||||
except AttributeError:
|
||||
try:
|
||||
logger.debug(
|
||||
'%(host)s:%(port)i: closing', self.destination._asdict())
|
||||
except AttributeError:
|
||||
logger.debug('Disconnected socket closing')
|
||||
AdvancedDispatcher.handle_close(self)
|
||||
|
||||
|
||||
class BMStringParser(BMProto):
|
||||
"""
|
||||
A special case of BMProto used by objectProcessor to send ACK
|
||||
"""
|
||||
def __init__(self):
|
||||
super(BMStringParser, self).__init__()
|
||||
self.destination = Peer('127.0.0.1', 8444)
|
||||
self.payload = None
|
||||
ObjectTracker.__init__(self)
|
||||
|
||||
def send_data(self, data):
|
||||
"""Send object given by the data string"""
|
||||
# This class is introduced specially for ACK sending, please
|
||||
# change log strings if you are going to use it for something else
|
||||
self.bm_proto_reset()
|
||||
self.payload = data
|
||||
try:
|
||||
self.bm_command_object()
|
||||
except BMObjectAlreadyHaveError:
|
||||
pass # maybe the same msg received on different nodes
|
||||
except BMObjectExpiredError:
|
||||
logger.debug(
|
||||
'Sending ACK failure (expired): %s', hexlify(data))
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
'Exception of type %s while sending ACK',
|
||||
type(e), exc_info=True)
|
|
@ -1,77 +0,0 @@
|
|||
"""
|
||||
Select which node to connect to
|
||||
"""
|
||||
# pylint: disable=too-many-branches, logging-format-interpolation, unidiomatic-typecheck
|
||||
import logging
|
||||
import random # nosec
|
||||
|
||||
import knownnodes
|
||||
import protocol
|
||||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from queues import Queue, portCheckerQueue
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
def getDiscoveredPeer():
|
||||
"""Get a peer from the local peer discovery list"""
|
||||
try:
|
||||
peer = random.choice([key for key in state.discoveredPeers.keys()])
|
||||
except (IndexError, KeyError):
|
||||
raise ValueError
|
||||
try:
|
||||
del state.discoveredPeers[peer]
|
||||
except KeyError:
|
||||
pass
|
||||
return peer
|
||||
|
||||
|
||||
def chooseConnection(stream):
|
||||
"""Returns an appropriate connection"""
|
||||
haveOnion = BMConfigParser().safeGet(
|
||||
"bitmessagesettings", "socksproxytype")[0:5] == 'SOCKS'
|
||||
onionOnly = BMConfigParser().safeGetBoolean(
|
||||
"bitmessagesettings", "onionservicesonly")
|
||||
try:
|
||||
retval = portCheckerQueue.get(False)
|
||||
portCheckerQueue.task_done()
|
||||
return retval
|
||||
except Queue.Empty:
|
||||
pass
|
||||
# with a probability of 0.5, connect to a discovered peer
|
||||
if random.choice((False, True)) and not haveOnion:
|
||||
# discovered peers are already filtered by allowed streams
|
||||
return getDiscoveredPeer()
|
||||
for _ in range(50):
|
||||
peer = random.choice([key for key in knownnodes.knownNodes[stream].keys()])
|
||||
try:
|
||||
peer_info = knownnodes.knownNodes[stream][peer]
|
||||
if peer_info.get('self'):
|
||||
continue
|
||||
rating = peer_info["rating"]
|
||||
except TypeError:
|
||||
logger.warning('Error in {}'.format(peer))
|
||||
rating = 0
|
||||
if haveOnion:
|
||||
# do not connect to raw IP addresses
|
||||
# --keep all traffic within Tor overlay
|
||||
if onionOnly and not peer.host.endswith('.onion'):
|
||||
continue
|
||||
# onion addresses have a higher priority when SOCKS
|
||||
if peer.host.endswith('.onion') and rating > 0:
|
||||
rating = 1
|
||||
# TODO: need better check
|
||||
elif not peer.host.startswith('bootstrap'):
|
||||
encodedAddr = protocol.encodeHost(peer.host)
|
||||
# don't connect to local IPs when using SOCKS
|
||||
if not protocol.checkIPAddress(encodedAddr, False):
|
||||
continue
|
||||
if rating > 1:
|
||||
rating = 1
|
||||
try:
|
||||
if 0.05 / (1.0 - rating) > random.random():
|
||||
return peer
|
||||
except ZeroDivisionError:
|
||||
return peer
|
||||
raise ValueError
|
|
@ -1,419 +0,0 @@
|
|||
"""
|
||||
`BMConnectionPool` class definition
|
||||
"""
|
||||
import errno
|
||||
import logging
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
|
||||
import pybitmessage.network.asyncore_pollchoose as asyncore
|
||||
from pybitmessage import helper_random
|
||||
# import knownnodes
|
||||
# import protocol
|
||||
from pybitmessage import state
|
||||
from pybitmessage.bmconfigparser import BMConfigParser
|
||||
# from network.connectionchooser import chooseConnection
|
||||
# from network.proxy import Proxy
|
||||
|
||||
# from network.tcp import (
|
||||
# TCPServer, Socks5BMConnection, Socks4aBMConnection, TCPConnection, bootstrap)
|
||||
# from network.udp import UDPSocket
|
||||
from pybitmessage.singleton import Singleton
|
||||
# from .node import Peer
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
@Singleton
|
||||
class BMConnectionPool(object):
|
||||
"""Pool of all existing connections"""
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
trustedPeer = None
|
||||
|
||||
"""
|
||||
If the trustedpeer option is specified in keys.dat then this will
|
||||
contain a Peer which will be connected to instead of using the
|
||||
addresses advertised by other peers.
|
||||
|
||||
The expected use case is where the user has a trusted server where
|
||||
they run a Bitmessage daemon permanently. If they then run a second
|
||||
instance of the client on a local machine periodically when they want
|
||||
to check for messages it will sync with the network a lot faster
|
||||
without compromising security.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
asyncore.set_rates(
|
||||
BMConfigParser().safeGetInt(
|
||||
"bitmessagesettings", "maxdownloadrate"),
|
||||
BMConfigParser().safeGetInt(
|
||||
"bitmessagesettings", "maxuploadrate")
|
||||
)
|
||||
self.outboundConnections = {}
|
||||
self.inboundConnections = {}
|
||||
self.listeningSockets = {}
|
||||
self.udpSockets = {}
|
||||
self.streams = []
|
||||
self._lastSpawned = 0
|
||||
self._spawnWait = 2
|
||||
self._bootstrapped = False
|
||||
|
||||
trustedPeer = BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'trustedpeer')
|
||||
try:
|
||||
if trustedPeer:
|
||||
host, port = trustedPeer.split(':')
|
||||
self.trustedPeer = Peer(host, int(port))
|
||||
except ValueError:
|
||||
sys.exit(
|
||||
'Bad trustedpeer config setting! It should be set as'
|
||||
' trustedpeer=<hostname>:<portnumber>'
|
||||
)
|
||||
|
||||
def connections(self):
|
||||
"""
|
||||
Shortcut for combined list of connections from
|
||||
`inboundConnections` and `outboundConnections` dicts
|
||||
"""
|
||||
inboundConnections = [inboundConnections for inboundConnections in self.inboundConnections.values()]
|
||||
outboundConnections = [outboundConnections for outboundConnections in self.outboundConnections.values()]
|
||||
return [connections for connections in inboundConnections + outboundConnections]
|
||||
|
||||
def establishedConnections(self):
|
||||
"""Shortcut for list of connections having fullyEstablished == True"""
|
||||
return [
|
||||
x for x in self.connections() if x.fullyEstablished]
|
||||
|
||||
def connectToStream(self, streamNumber):
|
||||
"""Connect to a bitmessage stream"""
|
||||
self.streams.append(streamNumber)
|
||||
state.streamsInWhichIAmParticipating.append(streamNumber)
|
||||
|
||||
def getConnectionByAddr(self, addr):
|
||||
"""
|
||||
Return an (existing) connection object based on a `Peer` object
|
||||
(IP and port)
|
||||
"""
|
||||
try:
|
||||
return self.inboundConnections[addr]
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
return self.inboundConnections[addr.host]
|
||||
except (KeyError, AttributeError):
|
||||
pass
|
||||
try:
|
||||
return self.outboundConnections[addr]
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
return self.udpSockets[addr.host]
|
||||
except (KeyError, AttributeError):
|
||||
pass
|
||||
raise KeyError
|
||||
|
||||
def isAlreadyConnected(self, nodeid):
|
||||
"""Check if we're already connected to this peer"""
|
||||
|
||||
# for i in (
|
||||
# self.inboundConnections.values() +
|
||||
# self.outboundConnections.values()
|
||||
# ):
|
||||
# for i in (
|
||||
# [inboundConnections for inboundConnections in self.inboundConnections.values()] +
|
||||
# [outboundConnections for outboundConnections in self.outboundConnections.values()]
|
||||
# ):
|
||||
|
||||
for i in self.connections():
|
||||
try:
|
||||
if nodeid == i.nodeid:
|
||||
return True
|
||||
except AttributeError:
|
||||
pass
|
||||
return False
|
||||
|
||||
def addConnection(self, connection):
|
||||
"""Add a connection object to our internal dict"""
|
||||
if isinstance(connection, UDPSocket):
|
||||
return
|
||||
if connection.isOutbound:
|
||||
self.outboundConnections[connection.destination] = connection
|
||||
else:
|
||||
if connection.destination.host in self.inboundConnections:
|
||||
self.inboundConnections[connection.destination] = connection
|
||||
else:
|
||||
self.inboundConnections[connection.destination.host] = \
|
||||
connection
|
||||
|
||||
# def removeConnection(self, connection):
|
||||
# """Remove a connection from our internal dict"""
|
||||
# if isinstance(connection, UDPSocket):
|
||||
# del self.udpSockets[connection.listening.host]
|
||||
# elif isinstance(connection, TCPServer):
|
||||
# del self.listeningSockets[Peer(
|
||||
# connection.destination.host, connection.destination.port)]
|
||||
# elif connection.isOutbound:
|
||||
# try:
|
||||
# del self.outboundConnections[connection.destination]
|
||||
# except KeyError:
|
||||
# pass
|
||||
# else:
|
||||
# try:
|
||||
# del self.inboundConnections[connection.destination]
|
||||
# except KeyError:
|
||||
# try:
|
||||
# del self.inboundConnections[connection.destination.host]
|
||||
# except KeyError:
|
||||
# pass
|
||||
# connection.handle_close()
|
||||
|
||||
@staticmethod
|
||||
def getListeningIP():
|
||||
"""What IP are we supposed to be listening on?"""
|
||||
if BMConfigParser().safeGet(
|
||||
"bitmessagesettings", "onionhostname").endswith(".onion"):
|
||||
host = BMConfigParser().safeGet(
|
||||
"bitmessagesettings", "onionbindip")
|
||||
else:
|
||||
host = '127.0.0.1'
|
||||
if (
|
||||
BMConfigParser().safeGetBoolean("bitmessagesettings", "sockslisten")
|
||||
or BMConfigParser().safeGet("bitmessagesettings", "socksproxytype")
|
||||
== "none"
|
||||
):
|
||||
# python doesn't like bind + INADDR_ANY?
|
||||
# host = socket.INADDR_ANY
|
||||
host = BMConfigParser().get("network", "bind")
|
||||
return host
|
||||
|
||||
def startListening(self, bind=None):
|
||||
"""Open a listening socket and start accepting connections on it"""
|
||||
if bind is None:
|
||||
bind = self.getListeningIP()
|
||||
port = BMConfigParser().safeGetInt("bitmessagesettings", "port")
|
||||
# correct port even if it changed
|
||||
ls = TCPServer(host=bind, port=port)
|
||||
self.listeningSockets[ls.destination] = ls
|
||||
|
||||
def startUDPSocket(self, bind=None):
|
||||
"""
|
||||
Open an UDP socket. Depending on settings, it can either only
|
||||
accept incoming UDP packets, or also be able to send them.
|
||||
"""
|
||||
if bind is None:
|
||||
host = self.getListeningIP()
|
||||
udpSocket = UDPSocket(host=host, announcing=True)
|
||||
else:
|
||||
if bind is False:
|
||||
udpSocket = UDPSocket(announcing=False)
|
||||
else:
|
||||
udpSocket = UDPSocket(host=bind, announcing=True)
|
||||
self.udpSockets[udpSocket.listening.host] = udpSocket
|
||||
|
||||
def startBootstrappers(self):
|
||||
"""Run the process of resolving bootstrap hostnames"""
|
||||
proxy_type = BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'socksproxytype')
|
||||
# A plugins may be added here
|
||||
hostname = None
|
||||
if not proxy_type or proxy_type == 'none':
|
||||
connection_base = TCPConnection
|
||||
elif proxy_type == 'SOCKS5':
|
||||
connection_base = Socks5BMConnection
|
||||
hostname = helper_random.randomchoice([
|
||||
'quzwelsuziwqgpt2.onion', None
|
||||
])
|
||||
elif proxy_type == 'SOCKS4a':
|
||||
connection_base = Socks4aBMConnection # FIXME: I cannot test
|
||||
else:
|
||||
# This should never happen because socksproxytype setting
|
||||
# is handled in bitmessagemain before starting the connectionpool
|
||||
return
|
||||
bootstrapper = bootstrap(connection_base)
|
||||
if not hostname:
|
||||
port = helper_random.randomchoice([8080, 8444])
|
||||
hostname = ('bootstrap{}.bitmessage.org'.format(port))
|
||||
else:
|
||||
port = 8444
|
||||
self.addConnection(bootstrapper(hostname, port))
|
||||
|
||||
def loop(self): # pylint: disable=too-many-branches,too-many-statements
|
||||
"""Main Connectionpool's loop"""
|
||||
# pylint: disable=too-many-locals
|
||||
# defaults to empty loop if outbound connections are maxed
|
||||
spawnConnections = False
|
||||
acceptConnections = True
|
||||
if BMConfigParser().safeGetBoolean(
|
||||
'bitmessagesettings', 'dontconnect'):
|
||||
acceptConnections = False
|
||||
elif bool(BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'sendoutgoingconnections')):
|
||||
spawnConnections = True
|
||||
socksproxytype = BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'socksproxytype', '')
|
||||
onionsocksproxytype = BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'onionsocksproxytype', '')
|
||||
if (
|
||||
socksproxytype[:5] == 'SOCKS'
|
||||
and not BMConfigParser().safeGetBoolean(
|
||||
'bitmessagesettings', 'sockslisten')
|
||||
and '.onion' not in BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'onionhostname', '')
|
||||
):
|
||||
acceptConnections = False
|
||||
|
||||
# pylint: disable=too-many-nested-blocks
|
||||
# if spawnConnections:
|
||||
# if not knownnodes.knownNodesActual:
|
||||
# self.startBootstrappers()
|
||||
# knownnodes.knownNodesActual = True
|
||||
# if not self._bootstrapped:
|
||||
# self._bootstrapped = True
|
||||
# Proxy.proxy = (
|
||||
# BMConfigParser().safeGet(
|
||||
# 'bitmessagesettings', 'sockshostname'),
|
||||
# BMConfigParser().safeGetInt(
|
||||
# 'bitmessagesettings', 'socksport')
|
||||
# )
|
||||
# # TODO AUTH
|
||||
# # TODO reset based on GUI settings changes
|
||||
# try:
|
||||
# if not onionsocksproxytype.startswith("SOCKS"):
|
||||
# raise ValueError
|
||||
# Proxy.onion_proxy = (
|
||||
# BMConfigParser().safeGet(
|
||||
# 'network', 'onionsockshostname', None),
|
||||
# BMConfigParser().safeGet(
|
||||
# 'network', 'onionsocksport', None)
|
||||
# )
|
||||
# except ValueError:
|
||||
# Proxy.onion_proxy = None
|
||||
# established = sum(
|
||||
# 1 for c in [outboundConnections for outboundConnections in self.outboundConnections.values()]
|
||||
# if (c.connected and c.fullyEstablished))
|
||||
# pending = len(self.outboundConnections) - established
|
||||
# if established < BMConfigParser().safeGetInt(
|
||||
# 'bitmessagesettings', 'maxoutboundconnections'):
|
||||
# for i in range(
|
||||
# state.maximumNumberOfHalfOpenConnections - pending):
|
||||
# try:
|
||||
# chosen = self.trustedPeer or chooseConnection(
|
||||
# helper_random.randomchoice(self.streams))
|
||||
# except ValueError:
|
||||
# continue
|
||||
# if chosen in self.outboundConnections:
|
||||
# continue
|
||||
# if chosen.host in self.inboundConnections:
|
||||
# continue
|
||||
# # don't connect to self
|
||||
# if chosen in state.ownAddresses:
|
||||
# continue
|
||||
# # don't connect to the hosts from the same
|
||||
# # network group, defense against sibyl attacks
|
||||
# host_network_group = protocol.network_group(
|
||||
# chosen.host)
|
||||
# same_group = False
|
||||
# for j in self.outboundConnections.values():
|
||||
# if host_network_group == j.network_group:
|
||||
# same_group = True
|
||||
# if chosen.host == j.destination.host:
|
||||
# knownnodes.decreaseRating(chosen)
|
||||
# break
|
||||
# if same_group:
|
||||
# continue
|
||||
|
||||
# try:
|
||||
# # pylint: disable=unidiomatic-typecheck
|
||||
# if type(chosen.host) == bytes:
|
||||
# onion = '.onion'.encode()
|
||||
# else:
|
||||
# onion = '.onion'
|
||||
# if chosen.host.endswith(onion) and Proxy.onion_proxy:
|
||||
# if onionsocksproxytype == "SOCKS5":
|
||||
# self.addConnection(Socks5BMConnection(chosen))
|
||||
# elif onionsocksproxytype == "SOCKS4a":
|
||||
# self.addConnection(Socks4aBMConnection(chosen))
|
||||
# elif socksproxytype == "SOCKS5":
|
||||
# self.addConnection(Socks5BMConnection(chosen))
|
||||
# elif socksproxytype == "SOCKS4a":
|
||||
# self.addConnection(Socks4aBMConnection(chosen))
|
||||
# else:
|
||||
# self.addConnection(TCPConnection(chosen))
|
||||
# except socket.error as e:
|
||||
# if e.errno == errno.ENETUNREACH:
|
||||
# continue
|
||||
# self._lastSpawned = time.time()
|
||||
# else:
|
||||
# for i in self.connections():
|
||||
# # FIXME: rating will be increased after next connection
|
||||
# i.handle_close()
|
||||
|
||||
if acceptConnections:
|
||||
if not self.listeningSockets:
|
||||
if BMConfigParser().safeGet('network', 'bind') == '':
|
||||
self.startListening()
|
||||
else:
|
||||
for bind in re.sub(
|
||||
r'[^\w.]+', ' ',
|
||||
BMConfigParser().safeGet('network', 'bind')
|
||||
).split():
|
||||
self.startListening(bind)
|
||||
logger.info('Listening for incoming connections.')
|
||||
if False:
|
||||
if BMConfigParser().safeGet('network', 'bind') == '':
|
||||
self.startUDPSocket()
|
||||
else:
|
||||
for bind in re.sub(
|
||||
r'[^\w.]+', ' ',
|
||||
BMConfigParser().safeGet('network', 'bind')
|
||||
).split():
|
||||
self.startUDPSocket(bind)
|
||||
self.startUDPSocket(False)
|
||||
logger.info('Starting UDP socket(s).')
|
||||
else:
|
||||
if self.listeningSockets:
|
||||
for i in self.listeningSockets.values():
|
||||
i.close_reason = "Stopping listening"
|
||||
i.accepting = i.connecting = i.connected = False
|
||||
logger.info('Stopped listening for incoming connections.')
|
||||
if self.udpSockets:
|
||||
for i in self.udpSockets.values():
|
||||
i.close_reason = "Stopping UDP socket"
|
||||
i.accepting = i.connecting = i.connected = False
|
||||
logger.info('Stopped udp sockets.')
|
||||
|
||||
loopTime = float(self._spawnWait)
|
||||
if self._lastSpawned < time.time() - self._spawnWait:
|
||||
loopTime = 2.0
|
||||
asyncore.loop(timeout=loopTime, count=1000)
|
||||
|
||||
reaper = []
|
||||
|
||||
# for i in self.connections():
|
||||
# minTx = time.time() - 20
|
||||
# if i.fullyEstablished:
|
||||
# minTx -= 300 - 20
|
||||
# if i.lastTx < minTx:
|
||||
# if i.fullyEstablished:
|
||||
# i.append_write_buf(protocol.CreatePacket('ping'))
|
||||
# else:
|
||||
# i.close_reason = "Timeout (%is)" % (
|
||||
# time.time() - i.lastTx)
|
||||
# i.set_state("close")
|
||||
for i in (
|
||||
self.connections() +
|
||||
[listeningSockets for listeningSockets in self.listeningSockets.values()] +
|
||||
[udpSockets for udpSockets in self.udpSockets.values()]
|
||||
):
|
||||
if not (i.accepting or i.connecting or i.connected):
|
||||
reaper.append(i)
|
||||
else:
|
||||
try:
|
||||
if i.state == "close":
|
||||
reaper.append(i)
|
||||
except AttributeError:
|
||||
pass
|
||||
# for i in reaper:
|
||||
# self.removeConnection(i)
|
|
@ -1,17 +0,0 @@
|
|||
"""
|
||||
Network protocol constants
|
||||
"""
|
||||
|
||||
|
||||
#: address is online if online less than this many seconds ago
|
||||
ADDRESS_ALIVE = 10800
|
||||
#: protocol specification says max 1000 addresses in one addr command
|
||||
MAX_ADDR_COUNT = 1000
|
||||
#: ~1.6 MB which is the maximum possible size of an inv message.
|
||||
MAX_MESSAGE_SIZE = 1600100
|
||||
#: 2**18 = 256kB is the maximum size of an object payload
|
||||
MAX_OBJECT_PAYLOAD_SIZE = 2**18
|
||||
#: protocol specification says max 50000 objects in one inv command
|
||||
MAX_OBJECT_COUNT = 50000
|
||||
#: maximum time offset
|
||||
MAX_TIME_OFFSET = 3600
|
|
@ -1,204 +0,0 @@
|
|||
"""
|
||||
Dandelion class definition, tracks stages
|
||||
"""
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from random import choice, expovariate, sample
|
||||
from threading import RLock
|
||||
from time import time
|
||||
|
||||
from network import connectionpool
|
||||
import state
|
||||
from queues import invQueue
|
||||
from singleton import Singleton
|
||||
|
||||
# randomise routes after 600 seconds
|
||||
REASSIGN_INTERVAL = 600
|
||||
|
||||
# trigger fluff due to expiration
|
||||
FLUFF_TRIGGER_FIXED_DELAY = 10
|
||||
FLUFF_TRIGGER_MEAN_DELAY = 30
|
||||
|
||||
MAX_STEMS = 2
|
||||
|
||||
Stem = namedtuple('Stem', ['child', 'stream', 'timeout'])
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
@Singleton
|
||||
class Dandelion(object):
|
||||
"""Dandelion class for tracking stem/fluff stages."""
|
||||
def __init__(self):
|
||||
# currently assignable child stems
|
||||
self.stem = []
|
||||
# currently assigned parent <-> child mappings
|
||||
self.nodeMap = {}
|
||||
# currently existing objects in stem mode
|
||||
self.hashMap = {}
|
||||
# when to rerandomise routes
|
||||
self.refresh = time() + REASSIGN_INTERVAL
|
||||
self.lock = RLock()
|
||||
|
||||
@staticmethod
|
||||
def poissonTimeout(start=None, average=0):
|
||||
"""Generate deadline using Poisson distribution"""
|
||||
if start is None:
|
||||
start = time()
|
||||
if average == 0:
|
||||
average = FLUFF_TRIGGER_MEAN_DELAY
|
||||
return start + expovariate(1.0 / average) + FLUFF_TRIGGER_FIXED_DELAY
|
||||
|
||||
def addHash(self, hashId, source=None, stream=1):
|
||||
"""Add inventory vector to dandelion stem"""
|
||||
if not state.dandelion:
|
||||
return
|
||||
with self.lock:
|
||||
self.hashMap[hashId] = Stem(
|
||||
self.getNodeStem(source),
|
||||
stream,
|
||||
self.poissonTimeout())
|
||||
|
||||
def setHashStream(self, hashId, stream=1):
|
||||
"""
|
||||
Update stream for inventory vector (as inv/dinv commands don't
|
||||
include streams, we only learn this after receiving the object)
|
||||
"""
|
||||
with self.lock:
|
||||
if hashId in self.hashMap:
|
||||
self.hashMap[hashId] = Stem(
|
||||
self.hashMap[hashId].child,
|
||||
stream,
|
||||
self.poissonTimeout())
|
||||
|
||||
def removeHash(self, hashId, reason="no reason specified"):
|
||||
"""Switch inventory vector from stem to fluff mode"""
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
'%s entering fluff mode due to %s.',
|
||||
''.join('%02x' % ord(i) for i in hashId), reason)
|
||||
with self.lock:
|
||||
try:
|
||||
del self.hashMap[hashId]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def hasHash(self, hashId):
|
||||
"""Is inventory vector in stem mode?"""
|
||||
return hashId in self.hashMap
|
||||
|
||||
def objectChildStem(self, hashId):
|
||||
"""Child (i.e. next) node for an inventory vector during stem mode"""
|
||||
return self.hashMap[hashId].child
|
||||
|
||||
def maybeAddStem(self, connection):
|
||||
"""
|
||||
If we had too few outbound connections, add the current one to the
|
||||
current stem list. Dandelion as designed by the authors should
|
||||
always have two active stem child connections.
|
||||
"""
|
||||
# fewer than MAX_STEMS outbound connections at last reshuffle?
|
||||
with self.lock:
|
||||
if len(self.stem) < MAX_STEMS:
|
||||
self.stem.append(connection)
|
||||
for k in (k for k, v in iter(self.nodeMap.items()) if v is None):
|
||||
self.nodeMap[k] = connection
|
||||
# The Purpose of adding this condition that if self
|
||||
# hashMap is has any value
|
||||
# if not [hasmap for hasmap in self.hashMap.items()] ==[]:
|
||||
try:
|
||||
for k, v in {
|
||||
k: v for k, v in iter([hasmap for hasmap in self.hashMap.items()])
|
||||
if v.child is None
|
||||
}.items():
|
||||
self.hashMap[k] = Stem(
|
||||
connection, v.stream, self.poissonTimeout())
|
||||
invQueue.put((v.stream, k, v.child))
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def maybeRemoveStem(self, connection):
|
||||
"""
|
||||
Remove current connection from the stem list (called e.g. when
|
||||
a connection is closed).
|
||||
"""
|
||||
# is the stem active?
|
||||
with self.lock:
|
||||
if connection in self.stem:
|
||||
self.stem.remove(connection)
|
||||
# active mappings to pointing to the removed node
|
||||
|
||||
for k in (
|
||||
k for k, v in iter(self.nodeMap.items()) if v == connection
|
||||
# k for k, v in self.nodeMap.iteritems()
|
||||
# if v == connection
|
||||
):
|
||||
self.nodeMap[k] = None
|
||||
for k, v in {
|
||||
k: v for k, v in iter(iter([hasmap for hasmap in self.hashMap.items()]))
|
||||
if v.child == connection
|
||||
}.items():
|
||||
self.hashMap[k] = Stem(
|
||||
None, v.stream, self.poissonTimeout())
|
||||
|
||||
def pickStem(self, parent=None):
|
||||
"""
|
||||
Pick a random active stem, but not the parent one
|
||||
(the one where an object came from)
|
||||
"""
|
||||
try:
|
||||
# pick a random from available stems
|
||||
stem = choice(range(len(self.stem)))
|
||||
if self.stem[stem] == parent:
|
||||
# one stem available and it's the parent
|
||||
if len(self.stem) == 1:
|
||||
return None
|
||||
# else, pick the other one
|
||||
return self.stem[1 - stem]
|
||||
# all ok
|
||||
return self.stem[stem]
|
||||
except IndexError:
|
||||
# no stems available
|
||||
return None
|
||||
|
||||
def getNodeStem(self, node=None):
|
||||
"""
|
||||
Return child stem node for a given parent stem node
|
||||
(the mapping is static for about 10 minutes, then it reshuffles)
|
||||
"""
|
||||
with self.lock:
|
||||
try:
|
||||
return self.nodeMap[node]
|
||||
except KeyError:
|
||||
self.nodeMap[node] = self.pickStem(node)
|
||||
return self.nodeMap[node]
|
||||
|
||||
def expire(self):
|
||||
"""Switch expired objects from stem to fluff mode"""
|
||||
with self.lock:
|
||||
deadline = time()
|
||||
toDelete = [
|
||||
[v.stream, k, v.child] for k, v in iter(self.hashMap.items())
|
||||
if v.timeout < deadline
|
||||
]
|
||||
|
||||
for row in toDelete:
|
||||
self.removeHash(row[1], 'expiration')
|
||||
invQueue.put(row)
|
||||
return toDelete
|
||||
|
||||
def reRandomiseStems(self):
|
||||
"""Re-shuffle stem mapping (parent <-> child pairs)"""
|
||||
with self.lock:
|
||||
try:
|
||||
# random two connections
|
||||
self.stem = sample(
|
||||
list(connectionpool.BMConnectionPool(
|
||||
).outboundConnections.values()), MAX_STEMS)
|
||||
# not enough stems available
|
||||
except ValueError:
|
||||
self.stem = connectionpool.BMConnectionPool(
|
||||
).outboundConnections.values()
|
||||
self.nodeMap = {}
|
||||
# hashMap stays to cater for pending stems
|
||||
self.refresh = time() + REASSIGN_INTERVAL
|
|
@ -1,84 +0,0 @@
|
|||
"""
|
||||
`DownloadThread` class definition
|
||||
"""
|
||||
import time
|
||||
|
||||
import addresses
|
||||
import helper_random
|
||||
import protocol
|
||||
from network.dandelion import Dandelion
|
||||
from inventory import Inventory
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.objectracker import missingObjects
|
||||
from network.threads import StoppableThread
|
||||
|
||||
|
||||
class DownloadThread(StoppableThread):
|
||||
"""Thread-based class for downloading from connections"""
|
||||
minPending = 200
|
||||
maxRequestChunk = 1000
|
||||
requestTimeout = 60
|
||||
cleanInterval = 60
|
||||
requestExpires = 3600
|
||||
|
||||
def __init__(self):
|
||||
super(DownloadThread, self).__init__(name="Downloader")
|
||||
self.lastCleaned = time.time()
|
||||
|
||||
def cleanPending(self):
|
||||
"""Expire pending downloads eventually"""
|
||||
deadline = time.time() - self.requestExpires
|
||||
try:
|
||||
toDelete = [k for k, v in iter(missingObjects.items()) if v < deadline]
|
||||
# toDelete = [
|
||||
# k for k, v in missingObjects.iteritems()
|
||||
# if v < deadline]
|
||||
except RuntimeError:
|
||||
pass
|
||||
else:
|
||||
for i in toDelete:
|
||||
del missingObjects[i]
|
||||
self.lastCleaned = time.time()
|
||||
|
||||
def run(self): # pylint: disable=protected-access
|
||||
while not self._stopped:
|
||||
requested = 0
|
||||
connections = BMConnectionPool().establishedConnections()
|
||||
helper_random.randomshuffle(connections)
|
||||
requestChunk = max(int(
|
||||
min(self.maxRequestChunk, len(missingObjects))
|
||||
/ len(connections)), 1) if connections else 1
|
||||
|
||||
for i in connections:
|
||||
now = time.time()
|
||||
# avoid unnecessary delay
|
||||
if i.skipUntil >= now:
|
||||
continue
|
||||
try:
|
||||
request = i.objectsNewToMe.randomKeys(requestChunk)
|
||||
except KeyError:
|
||||
continue
|
||||
payload = bytearray()
|
||||
chunkCount = 0
|
||||
for chunk in request:
|
||||
if chunk in Inventory() and not Dandelion().hasHash(chunk):
|
||||
try:
|
||||
del i.objectsNewToMe[chunk]
|
||||
except KeyError:
|
||||
pass
|
||||
continue
|
||||
payload.extend(chunk)
|
||||
chunkCount += 1
|
||||
missingObjects[chunk] = now
|
||||
if not chunkCount:
|
||||
continue
|
||||
payload[0:0] = addresses.encodeVarint(chunkCount)
|
||||
i.append_write_buf(protocol.CreatePacket('getdata', payload))
|
||||
self.logger.debug(
|
||||
'%s:%i Requesting %i objects',
|
||||
i.destination.host, i.destination.port, chunkCount)
|
||||
requested += chunkCount
|
||||
if time.time() >= self.lastCleaned + self.cleanInterval:
|
||||
self.cleanPending()
|
||||
if not requested:
|
||||
self.stop.wait(1)
|
|
@ -1,90 +0,0 @@
|
|||
# pylint: disable=redefined-outer-name, too-many-ancestors, missing-docstring
|
||||
import socket
|
||||
|
||||
from advanceddispatcher import AdvancedDispatcher
|
||||
import asyncore_pollchoose as asyncore
|
||||
from network.proxy import ProxyError
|
||||
from socks5 import Socks5Connection, Socks5Resolver
|
||||
from socks4a import Socks4aConnection, Socks4aResolver
|
||||
|
||||
|
||||
class HttpError(ProxyError):
|
||||
pass
|
||||
|
||||
|
||||
class HttpConnection(AdvancedDispatcher):
|
||||
def __init__(self, host, path="/"):
|
||||
AdvancedDispatcher.__init__(self)
|
||||
self.path = path
|
||||
self.destination = (host, 80)
|
||||
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.connect(self.destination)
|
||||
print("connecting in background to %s:%i" % (self.destination[0], self.destination[1]))
|
||||
|
||||
def state_init(self):
|
||||
self.append_write_buf(
|
||||
"GET %s HTTP/1.1\r\nHost: %s\r\nConnection: close\r\n\r\n" % (
|
||||
self.path, self.destination[0]))
|
||||
print("Sending %ib" % (len(self.write_buf)))
|
||||
self.set_state("http_request_sent", 0)
|
||||
return False
|
||||
|
||||
def state_http_request_sent(self):
|
||||
if self.read_buf:
|
||||
print("Received %ib" % (len(self.read_buf)))
|
||||
self.read_buf = b""
|
||||
if not self.connected:
|
||||
self.set_state("close", 0)
|
||||
return False
|
||||
|
||||
|
||||
class Socks5HttpConnection(Socks5Connection, HttpConnection):
|
||||
def __init__(self, host, path="/"): # pylint: disable=super-init-not-called
|
||||
self.path = path
|
||||
Socks5Connection.__init__(self, address=(host, 80))
|
||||
|
||||
def state_socks_handshake_done(self):
|
||||
HttpConnection.state_init(self)
|
||||
return False
|
||||
|
||||
|
||||
class Socks4aHttpConnection(Socks4aConnection, HttpConnection):
|
||||
def __init__(self, host, path="/"): # pylint: disable=super-init-not-called
|
||||
Socks4aConnection.__init__(self, address=(host, 80))
|
||||
self.path = path
|
||||
|
||||
def state_socks_handshake_done(self):
|
||||
HttpConnection.state_init(self)
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# initial fill
|
||||
for host in ("bootstrap8080.bitmessage.org", "bootstrap8444.bitmessage.org"):
|
||||
proxy = Socks5Resolver(host=host)
|
||||
while asyncore.socket_map:
|
||||
print("loop %s, len %i" % (proxy.state, len(asyncore.socket_map)))
|
||||
asyncore.loop(timeout=1, count=1)
|
||||
proxy.resolved()
|
||||
|
||||
proxy = Socks4aResolver(host=host)
|
||||
while asyncore.socket_map:
|
||||
print("loop %s, len %i" % (proxy.state, len(asyncore.socket_map)))
|
||||
asyncore.loop(timeout=1, count=1)
|
||||
proxy.resolved()
|
||||
|
||||
for host in ("bitmessage.org",):
|
||||
direct = HttpConnection(host)
|
||||
while asyncore.socket_map:
|
||||
# print "loop, state = %s" % (direct.state)
|
||||
asyncore.loop(timeout=1, count=1)
|
||||
|
||||
proxy = Socks5HttpConnection(host)
|
||||
while asyncore.socket_map:
|
||||
# print "loop, state = %s" % (proxy.state)
|
||||
asyncore.loop(timeout=1, count=1)
|
||||
|
||||
proxy = Socks4aHttpConnection(host)
|
||||
while asyncore.socket_map:
|
||||
# print "loop, state = %s" % (proxy.state)
|
||||
asyncore.loop(timeout=1, count=1)
|
|
@ -1,55 +0,0 @@
|
|||
"""
|
||||
src/network/http_old.py
|
||||
"""
|
||||
import asyncore
|
||||
import socket
|
||||
import time
|
||||
|
||||
requestCount = 0
|
||||
parallel = 50
|
||||
duration = 60
|
||||
|
||||
|
||||
class HTTPClient(asyncore.dispatcher):
|
||||
"""An asyncore dispatcher"""
|
||||
port = 12345
|
||||
|
||||
def __init__(self, host, path, connect=True):
|
||||
if not hasattr(self, '_map'):
|
||||
asyncore.dispatcher.__init__(self)
|
||||
if connect:
|
||||
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.connect((host, HTTPClient.port))
|
||||
self.buffer = 'GET %s HTTP/1.0\r\n\r\n' % path
|
||||
|
||||
def handle_close(self):
|
||||
# pylint: disable=global-statement
|
||||
global requestCount
|
||||
requestCount += 1
|
||||
self.close()
|
||||
|
||||
def handle_read(self):
|
||||
# print self.recv(8192)
|
||||
self.recv(8192)
|
||||
|
||||
def writable(self):
|
||||
return len(self.buffer) > 0
|
||||
|
||||
def handle_write(self):
|
||||
sent = self.send(self.buffer)
|
||||
self.buffer = self.buffer[sent:]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# initial fill
|
||||
for i in range(parallel):
|
||||
HTTPClient('127.0.0.1', '/')
|
||||
start = time.time()
|
||||
while time.time() - start < duration:
|
||||
if len(asyncore.socket_map) < parallel:
|
||||
for i in range(parallel - len(asyncore.socket_map)):
|
||||
HTTPClient('127.0.0.1', '/')
|
||||
print("Active connections: %i" % (len(asyncore.socket_map)))
|
||||
asyncore.loop(count=len(asyncore.socket_map) / 2)
|
||||
if requestCount % 100 == 0:
|
||||
print("Processed %i total messages" % (requestCount))
|
|
@ -1,155 +0,0 @@
|
|||
"""
|
||||
src/network/httpd.py
|
||||
=======================
|
||||
"""
|
||||
import asyncore
|
||||
import socket
|
||||
|
||||
from .tls import TLSHandshake
|
||||
|
||||
|
||||
class HTTPRequestHandler(asyncore.dispatcher):
|
||||
"""Handling HTTP request"""
|
||||
response = """HTTP/1.0 200 OK\r
|
||||
Date: Sun, 23 Oct 2016 18:02:00 GMT\r
|
||||
Content-Type: text/html; charset=UTF-8\r
|
||||
Content-Encoding: UTF-8\r
|
||||
Content-Length: 136\r
|
||||
Last-Modified: Wed, 08 Jan 2003 23:11:55 GMT\r
|
||||
Server: Apache/1.3.3.7 (Unix) (Red-Hat/Linux)\r
|
||||
ETag: "3f80f-1b6-3e1cb03b"\r
|
||||
Accept-Ranges: bytes\r
|
||||
Connection: close\r
|
||||
\r
|
||||
<html>
|
||||
<head>
|
||||
<title>An Example Page</title>
|
||||
</head>
|
||||
<body>
|
||||
Hello World, this is a very simple HTML document.
|
||||
</body>
|
||||
</html>"""
|
||||
|
||||
def __init__(self, sock):
|
||||
if not hasattr(self, '_map'):
|
||||
asyncore.dispatcher.__init__(self, sock)
|
||||
self.inbuf = ""
|
||||
self.ready = True
|
||||
self.busy = False
|
||||
self.respos = 0
|
||||
|
||||
def handle_close(self):
|
||||
self.close()
|
||||
|
||||
def readable(self):
|
||||
return self.ready
|
||||
|
||||
def writable(self):
|
||||
return self.busy
|
||||
|
||||
def handle_read(self):
|
||||
self.inbuf += self.recv(8192)
|
||||
if self.inbuf[-4:] == "\r\n\r\n":
|
||||
self.busy = True
|
||||
self.ready = False
|
||||
self.inbuf = ""
|
||||
elif self.inbuf == "":
|
||||
pass
|
||||
|
||||
def handle_write(self):
|
||||
if self.busy and self.respos < len(HTTPRequestHandler.response):
|
||||
written = 0
|
||||
written = self.send(HTTPRequestHandler.response[self.respos:65536])
|
||||
self.respos += written
|
||||
elif self.busy:
|
||||
self.busy = False
|
||||
self.ready = True
|
||||
self.close()
|
||||
|
||||
|
||||
class HTTPSRequestHandler(HTTPRequestHandler, TLSHandshake):
|
||||
"""Handling HTTPS request"""
|
||||
def __init__(self, sock):
|
||||
if not hasattr(self, '_map'):
|
||||
asyncore.dispatcher.__init__(self, sock) # pylint: disable=non-parent-init-called
|
||||
# self.tlsDone = False
|
||||
TLSHandshake.__init__(
|
||||
self,
|
||||
sock=sock,
|
||||
certfile='/home/shurdeek/src/PyBitmessage/src/sslkeys/cert.pem',
|
||||
keyfile='/home/shurdeek/src/PyBitmessage/src/sslkeys/key.pem',
|
||||
server_side=True)
|
||||
HTTPRequestHandler.__init__(self, sock)
|
||||
|
||||
def handle_connect(self):
|
||||
TLSHandshake.handle_connect(self)
|
||||
|
||||
def handle_close(self):
|
||||
if self.tlsDone:
|
||||
HTTPRequestHandler.close(self)
|
||||
else:
|
||||
TLSHandshake.close(self)
|
||||
|
||||
def readable(self):
|
||||
if self.tlsDone:
|
||||
return HTTPRequestHandler.readable(self)
|
||||
return TLSHandshake.readable(self)
|
||||
|
||||
def handle_read(self):
|
||||
if self.tlsDone:
|
||||
HTTPRequestHandler.handle_read(self)
|
||||
else:
|
||||
TLSHandshake.handle_read(self)
|
||||
|
||||
def writable(self):
|
||||
if self.tlsDone:
|
||||
return HTTPRequestHandler.writable(self)
|
||||
return TLSHandshake.writable(self)
|
||||
|
||||
def handle_write(self):
|
||||
if self.tlsDone:
|
||||
HTTPRequestHandler.handle_write(self)
|
||||
else:
|
||||
TLSHandshake.handle_write(self)
|
||||
|
||||
|
||||
class HTTPServer(asyncore.dispatcher):
|
||||
"""Handling HTTP Server"""
|
||||
port = 12345
|
||||
|
||||
def __init__(self):
|
||||
if not hasattr(self, '_map'):
|
||||
asyncore.dispatcher.__init__(self)
|
||||
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.set_reuse_addr()
|
||||
self.bind(('127.0.0.1', HTTPServer.port))
|
||||
self.connections = 0
|
||||
self.listen(5)
|
||||
|
||||
def handle_accept(self):
|
||||
pair = self.accept()
|
||||
if pair is not None:
|
||||
sock, _ = pair
|
||||
self.connections += 1
|
||||
HTTPRequestHandler(sock)
|
||||
|
||||
|
||||
class HTTPSServer(HTTPServer):
|
||||
"""Handling HTTPS Server"""
|
||||
port = 12345
|
||||
|
||||
def __init__(self):
|
||||
if not hasattr(self, '_map'):
|
||||
HTTPServer.__init__(self)
|
||||
|
||||
def handle_accept(self):
|
||||
pair = self.accept()
|
||||
if pair is not None:
|
||||
sock, _ = pair
|
||||
self.connections += 1
|
||||
HTTPSRequestHandler(sock)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
client = HTTPSServer()
|
||||
asyncore.loop()
|
|
@ -1,72 +0,0 @@
|
|||
# pylint: disable=missing-docstring
|
||||
import asyncore
|
||||
|
||||
from .http import HTTPClient
|
||||
from .tls import TLSHandshake
|
||||
"""
|
||||
self.sslSock = ssl.wrap_socket(
|
||||
self.sock,
|
||||
keyfile=os.path.join(paths.codePath(), 'sslkeys', 'key.pem'),
|
||||
certfile=os.path.join(paths.codePath(), 'sslkeys', 'cert.pem'),
|
||||
server_side=not self.initiatedConnection,
|
||||
ssl_version=ssl.PROTOCOL_TLSv1,
|
||||
do_handshake_on_connect=False,
|
||||
ciphers='AECDH-AES256-SHA')
|
||||
"""
|
||||
|
||||
|
||||
class HTTPSClient(HTTPClient, TLSHandshake):
|
||||
def __init__(self, host, path):
|
||||
# pylint: disable=non-parent-init-called
|
||||
if not hasattr(self, '_map'):
|
||||
asyncore.dispatcher.__init__(self)
|
||||
self.tlsDone = False
|
||||
"""
|
||||
TLSHandshake.__init__(
|
||||
self,
|
||||
address=(host, 443),
|
||||
certfile='/home/shurdeek/src/PyBitmessage/sslsrc/keys/cert.pem',
|
||||
keyfile='/home/shurdeek/src/PyBitmessage/src/sslkeys/key.pem',
|
||||
server_side=False,
|
||||
ciphers='AECDH-AES256-SHA')
|
||||
"""
|
||||
HTTPClient.__init__(self, host, path, connect=False)
|
||||
TLSHandshake.__init__(self, address=(host, 443), server_side=False)
|
||||
|
||||
def handle_connect(self):
|
||||
TLSHandshake.handle_connect(self)
|
||||
|
||||
def handle_close(self):
|
||||
if self.tlsDone:
|
||||
HTTPClient.close(self)
|
||||
else:
|
||||
TLSHandshake.close(self)
|
||||
|
||||
def readable(self):
|
||||
if self.tlsDone:
|
||||
return HTTPClient.readable(self)
|
||||
else:
|
||||
return TLSHandshake.readable(self)
|
||||
|
||||
def handle_read(self):
|
||||
if self.tlsDone:
|
||||
HTTPClient.handle_read(self)
|
||||
else:
|
||||
TLSHandshake.handle_read(self)
|
||||
|
||||
def writable(self):
|
||||
if self.tlsDone:
|
||||
return HTTPClient.writable(self)
|
||||
else:
|
||||
return TLSHandshake.writable(self)
|
||||
|
||||
def handle_write(self):
|
||||
if self.tlsDone:
|
||||
HTTPClient.handle_write(self)
|
||||
else:
|
||||
TLSHandshake.handle_write(self)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
client = HTTPSClient('anarchy.economicsofbitcoin.com', '/')
|
||||
asyncore.loop()
|
|
@ -1,110 +0,0 @@
|
|||
"""
|
||||
Thread to send inv annoucements
|
||||
"""
|
||||
import queue as Queue
|
||||
import random
|
||||
from time import time
|
||||
|
||||
import addresses
|
||||
import protocol
|
||||
import state
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.dandelion import Dandelion
|
||||
from queues import invQueue
|
||||
from network.threads import StoppableThread
|
||||
|
||||
|
||||
def handleExpiredDandelion(expired):
|
||||
"""For expired dandelion objects, mark all remotes as not having
|
||||
the object"""
|
||||
if not expired:
|
||||
return
|
||||
for i in BMConnectionPool().connections():
|
||||
if not i.fullyEstablished:
|
||||
continue
|
||||
for x in expired:
|
||||
streamNumber, hashid, _ = x
|
||||
try:
|
||||
del i.objectsNewToMe[hashid]
|
||||
except KeyError:
|
||||
if streamNumber in i.streams:
|
||||
with i.objectsNewToThemLock:
|
||||
i.objectsNewToThem[hashid] = time()
|
||||
|
||||
|
||||
class InvThread(StoppableThread):
|
||||
"""Main thread that sends inv annoucements"""
|
||||
|
||||
name = "InvBroadcaster"
|
||||
|
||||
@staticmethod
|
||||
def handleLocallyGenerated(stream, hashId):
|
||||
"""Locally generated inventory items require special handling"""
|
||||
Dandelion().addHash(hashId, stream=stream)
|
||||
for connection in BMConnectionPool().connections():
|
||||
if state.dandelion and connection != \
|
||||
Dandelion().objectChildStem(hashId):
|
||||
continue
|
||||
connection.objectsNewToThem[hashId] = time()
|
||||
|
||||
def run(self): # pylint: disable=too-many-branches
|
||||
while not state.shutdown: # pylint: disable=too-many-nested-blocks
|
||||
chunk = []
|
||||
while True:
|
||||
# Dandelion fluff trigger by expiration
|
||||
handleExpiredDandelion(Dandelion().expire())
|
||||
try:
|
||||
data = invQueue.get(False)
|
||||
chunk.append((data[0], data[1]))
|
||||
# locally generated
|
||||
if len(data) == 2 or data[2] is None:
|
||||
self.handleLocallyGenerated(data[0], data[1])
|
||||
except Queue.Empty:
|
||||
break
|
||||
|
||||
if chunk:
|
||||
for connection in BMConnectionPool().connections():
|
||||
fluffs = []
|
||||
stems = []
|
||||
for inv in chunk:
|
||||
if inv[0] not in connection.streams:
|
||||
continue
|
||||
try:
|
||||
with connection.objectsNewToThemLock:
|
||||
del connection.objectsNewToThem[inv[1]]
|
||||
except KeyError:
|
||||
continue
|
||||
try:
|
||||
if connection == Dandelion().objectChildStem(inv[1]):
|
||||
# Fluff trigger by RNG
|
||||
# auto-ignore if config set to 0, i.e. dandelion is off
|
||||
if random.randint(1, 100) >= state.dandelion:
|
||||
fluffs.append(inv[1])
|
||||
# send a dinv only if the stem node supports dandelion
|
||||
elif connection.services & protocol.NODE_DANDELION > 0:
|
||||
stems.append(inv[1])
|
||||
else:
|
||||
fluffs.append(inv[1])
|
||||
except KeyError:
|
||||
fluffs.append(inv[1])
|
||||
if fluffs:
|
||||
random.shuffle(fluffs)
|
||||
connection.append_write_buf(protocol.CreatePacket(
|
||||
'inv',
|
||||
addresses.encodeVarint(
|
||||
len(fluffs)) + ('').encode().join([x for x in fluffs]))) # compare result with python2
|
||||
if stems:
|
||||
random.shuffle(stems)
|
||||
connection.append_write_buf(protocol.CreatePacket(
|
||||
'dinv',
|
||||
addresses.encodeVarint(
|
||||
len(stems)) + ('').encode().join([x for x in stems]))) # compare result with python2
|
||||
|
||||
invQueue.iterate()
|
||||
for _ in range(len(chunk)):
|
||||
invQueue.task_done()
|
||||
|
||||
if Dandelion().refresh < time():
|
||||
Dandelion().reRandomiseStems()
|
||||
|
||||
self.stop.wait(1)
|
|
@ -1,42 +0,0 @@
|
|||
"""
|
||||
A thread to handle network concerns
|
||||
"""
|
||||
import pybitmessage.network.asyncore_pollchoose as asyncore
|
||||
from pybitmessage import state
|
||||
from pybitmessage.network.connectionpool import BMConnectionPool
|
||||
from pybitmessage.queues import excQueue
|
||||
from pybitmessage.network.threads import StoppableThread
|
||||
|
||||
|
||||
class BMNetworkThread(StoppableThread):
|
||||
"""Main network thread"""
|
||||
name = "Asyncore"
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
while not self._stopped and state.shutdown == 0:
|
||||
BMConnectionPool().loop()
|
||||
except Exception as e:
|
||||
excQueue.put((self.name, e))
|
||||
raise
|
||||
|
||||
def stopThread(self):
|
||||
super(BMNetworkThread, self).stopThread()
|
||||
for i in [listeningSockets for listeningSockets in BMConnectionPool().listeningSockets.values()]:
|
||||
try:
|
||||
i.close()
|
||||
except:
|
||||
pass
|
||||
for i in [outboundConnections for outboundConnections in BMConnectionPool().outboundConnections.values()]:
|
||||
try:
|
||||
i.close()
|
||||
except:
|
||||
pass
|
||||
for i in [inboundConnections for inboundConnections in BMConnectionPool().inboundConnections.values()]:
|
||||
try:
|
||||
i.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
# just in case
|
||||
asyncore.close_all()
|
|
@ -1,7 +0,0 @@
|
|||
"""
|
||||
Named tuples representing the network peers
|
||||
"""
|
||||
import collections
|
||||
|
||||
Peer = collections.namedtuple('Peer', ['host', 'port'])
|
||||
Node = collections.namedtuple('Node', ['services', 'host', 'port'])
|
|
@ -1,137 +0,0 @@
|
|||
"""
|
||||
Module for tracking objects
|
||||
"""
|
||||
import time
|
||||
from threading import RLock
|
||||
|
||||
import network.connectionpool
|
||||
from network.dandelion import Dandelion
|
||||
from network.randomtrackingdict import RandomTrackingDict
|
||||
|
||||
haveBloom = False
|
||||
|
||||
try:
|
||||
# pybloomfiltermmap
|
||||
from pybloomfilter import BloomFilter
|
||||
haveBloom = True
|
||||
except ImportError:
|
||||
try:
|
||||
# pybloom
|
||||
from pybloom import BloomFilter
|
||||
haveBloom = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# it isn't actually implemented yet so no point in turning it on
|
||||
haveBloom = False
|
||||
|
||||
# tracking pending downloads globally, for stats
|
||||
missingObjects = {}
|
||||
|
||||
|
||||
class ObjectTracker(object):
|
||||
"""Object tracker mixin"""
|
||||
invCleanPeriod = 300
|
||||
invInitialCapacity = 50000
|
||||
invErrorRate = 0.03
|
||||
trackingExpires = 3600
|
||||
initialTimeOffset = 60
|
||||
|
||||
def __init__(self):
|
||||
self.objectsNewToMe = RandomTrackingDict()
|
||||
self.objectsNewToThem = {}
|
||||
self.objectsNewToThemLock = RLock()
|
||||
self.initInvBloom()
|
||||
self.initAddrBloom()
|
||||
self.lastCleaned = time.time()
|
||||
|
||||
def initInvBloom(self):
|
||||
"""Init bloom filter for tracking. WIP."""
|
||||
if haveBloom:
|
||||
# lock?
|
||||
self.invBloom = BloomFilter(
|
||||
capacity=ObjectTracker.invInitialCapacity,
|
||||
error_rate=ObjectTracker.invErrorRate)
|
||||
|
||||
def initAddrBloom(self):
|
||||
"""Init bloom filter for tracking addrs, WIP.
|
||||
This either needs to be moved to addrthread.py or removed."""
|
||||
if haveBloom:
|
||||
# lock?
|
||||
self.addrBloom = BloomFilter(
|
||||
capacity=ObjectTracker.invInitialCapacity,
|
||||
error_rate=ObjectTracker.invErrorRate)
|
||||
|
||||
def clean(self):
|
||||
"""Clean up tracking to prevent memory bloat"""
|
||||
if self.lastCleaned < time.time() - ObjectTracker.invCleanPeriod:
|
||||
if haveBloom:
|
||||
if missingObjects == 0:
|
||||
self.initInvBloom()
|
||||
self.initAddrBloom()
|
||||
else:
|
||||
# release memory
|
||||
deadline = time.time() - ObjectTracker.trackingExpires
|
||||
with self.objectsNewToThemLock:
|
||||
self.objectsNewToThem = {k: v for k, v in iter(self.objectsNewToThem.items()) if v >= deadline}
|
||||
# self.objectsNewToThem = {
|
||||
# k: v
|
||||
# for k, v in self.objectsNewToThem.iteritems()
|
||||
# if v >= deadline}
|
||||
self.lastCleaned = time.time()
|
||||
|
||||
def hasObj(self, hashid):
|
||||
"""Do we already have object?"""
|
||||
if haveBloom:
|
||||
return hashid in self.invBloom
|
||||
return hashid in self.objectsNewToMe
|
||||
|
||||
def handleReceivedInventory(self, hashId):
|
||||
"""Handling received inventory"""
|
||||
if haveBloom:
|
||||
self.invBloom.add(hashId)
|
||||
try:
|
||||
with self.objectsNewToThemLock:
|
||||
del self.objectsNewToThem[hashId]
|
||||
except KeyError:
|
||||
pass
|
||||
if hashId not in missingObjects:
|
||||
missingObjects[hashId] = time.time()
|
||||
self.objectsNewToMe[hashId] = True
|
||||
|
||||
def handleReceivedObject(self, streamNumber, hashid):
|
||||
"""Handling received object"""
|
||||
for i in network.connectionpool.BMConnectionPool().connections():
|
||||
if not i.fullyEstablished:
|
||||
continue
|
||||
try:
|
||||
del i.objectsNewToMe[hashid]
|
||||
except KeyError:
|
||||
if streamNumber in i.streams and (
|
||||
not Dandelion().hasHash(hashid) or
|
||||
Dandelion().objectChildStem(hashid) == i):
|
||||
with i.objectsNewToThemLock:
|
||||
i.objectsNewToThem[hashid] = time.time()
|
||||
# update stream number,
|
||||
# which we didn't have when we just received the dinv
|
||||
# also resets expiration of the stem mode
|
||||
Dandelion().setHashStream(hashid, streamNumber)
|
||||
|
||||
if i == self:
|
||||
try:
|
||||
with i.objectsNewToThemLock:
|
||||
del i.objectsNewToThem[hashid]
|
||||
except KeyError:
|
||||
pass
|
||||
self.objectsNewToMe.setLastObject()
|
||||
|
||||
def hasAddr(self, addr):
|
||||
"""WIP, should be moved to addrthread.py or removed"""
|
||||
if haveBloom:
|
||||
return addr in self.invBloom
|
||||
return None
|
||||
|
||||
def addAddr(self, hashid):
|
||||
"""WIP, should be moved to addrthread.py or removed"""
|
||||
if haveBloom:
|
||||
self.addrBloom.add(hashid)
|
|
@ -1,151 +0,0 @@
|
|||
"""
|
||||
Set proxy if avaiable otherwise exception
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
import logging
|
||||
import socket
|
||||
import time
|
||||
|
||||
import network.asyncore_pollchoose as asyncore
|
||||
from network.advanceddispatcher import AdvancedDispatcher
|
||||
|
||||
from bmconfigparser import BMConfigParser
|
||||
from .node import Peer
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class ProxyError(Exception):
|
||||
"""Base proxy exception class"""
|
||||
errorCodes = ("Unknown error",)
|
||||
|
||||
def __init__(self, code=-1):
|
||||
self.code = code
|
||||
try:
|
||||
self.message = self.errorCodes[code]
|
||||
except IndexError:
|
||||
self.message = self.errorCodes[-1]
|
||||
super(ProxyError, self).__init__(self.message)
|
||||
|
||||
|
||||
class GeneralProxyError(ProxyError):
|
||||
"""General proxy error class (not specfic to an implementation)"""
|
||||
errorCodes = (
|
||||
"Success",
|
||||
"Invalid data",
|
||||
"Not connected",
|
||||
"Not available",
|
||||
"Bad proxy type",
|
||||
"Bad input",
|
||||
"Timed out",
|
||||
"Network unreachable",
|
||||
"Connection refused",
|
||||
"Host unreachable"
|
||||
)
|
||||
|
||||
|
||||
class Proxy(AdvancedDispatcher):
|
||||
"""Base proxy class"""
|
||||
# these are global, and if you change config during runtime,
|
||||
# all active/new instances should change too
|
||||
_proxy = ("127.0.0.1", 9050)
|
||||
_auth = None
|
||||
_onion_proxy = None
|
||||
_onion_auth = None
|
||||
_remote_dns = True
|
||||
|
||||
@property
|
||||
def proxy(self):
|
||||
"""Return proxy IP and port"""
|
||||
return self.__class__._proxy
|
||||
|
||||
@proxy.setter
|
||||
def proxy(self, address):
|
||||
"""Set proxy IP and port"""
|
||||
if (not isinstance(address, tuple) or len(address) < 2 or
|
||||
not isinstance(address[0], str) or
|
||||
not isinstance(address[1], int)):
|
||||
raise ValueError
|
||||
self.__class__._proxy = address
|
||||
|
||||
@property
|
||||
def auth(self):
|
||||
"""Return proxy authentication settings"""
|
||||
return self.__class__._auth
|
||||
|
||||
@auth.setter
|
||||
def auth(self, authTuple):
|
||||
"""Set proxy authentication (username and password)"""
|
||||
self.__class__._auth = authTuple
|
||||
|
||||
@property
|
||||
def onion_proxy(self):
|
||||
"""
|
||||
Return separate proxy IP and port for use only with onion
|
||||
addresses. Untested.
|
||||
"""
|
||||
return self.__class__._onion_proxy
|
||||
|
||||
@onion_proxy.setter
|
||||
def onion_proxy(self, address):
|
||||
"""Set onion proxy address"""
|
||||
if address is not None and (
|
||||
not isinstance(address, tuple) or len(address) < 2
|
||||
or not isinstance(address[0], str)
|
||||
or not isinstance(address[1], int)
|
||||
):
|
||||
raise ValueError
|
||||
self.__class__._onion_proxy = address
|
||||
|
||||
@property
|
||||
def onion_auth(self):
|
||||
"""Return proxy authentication settings for onion hosts only"""
|
||||
return self.__class__._onion_auth
|
||||
|
||||
@onion_auth.setter
|
||||
def onion_auth(self, authTuple):
|
||||
"""Set proxy authentication for onion hosts only. Untested."""
|
||||
self.__class__._onion_auth = authTuple
|
||||
|
||||
def __init__(self, address):
|
||||
if not isinstance(address, Peer):
|
||||
raise ValueError
|
||||
AdvancedDispatcher.__init__(self)
|
||||
self.destination = address
|
||||
self.isOutbound = True
|
||||
self.fullyEstablished = False
|
||||
self.connectedAt = 0
|
||||
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
if BMConfigParser().safeGetBoolean(
|
||||
"bitmessagesettings", "socksauthentication"):
|
||||
self.auth = (
|
||||
BMConfigParser().safeGet(
|
||||
"bitmessagesettings", "socksusername"),
|
||||
BMConfigParser().safeGet(
|
||||
"bitmessagesettings", "sockspassword"))
|
||||
else:
|
||||
self.auth = None
|
||||
self.connect(
|
||||
self.onion_proxy
|
||||
if address.host.endswith(".onion") and self.onion_proxy else
|
||||
self.proxy
|
||||
)
|
||||
|
||||
def handle_connect(self):
|
||||
"""Handle connection event (to the proxy)"""
|
||||
self.set_state("init")
|
||||
try:
|
||||
AdvancedDispatcher.handle_connect(self)
|
||||
except socket.error as e:
|
||||
if e.errno in asyncore._DISCONNECTED:
|
||||
logger.debug(
|
||||
"%s:%i: Connection failed: %s",
|
||||
self.destination.host, self.destination.port, e)
|
||||
return
|
||||
self.state_init()
|
||||
|
||||
def state_proxy_handshake_done(self):
|
||||
"""Handshake is complete at this point"""
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.connectedAt = time.time()
|
||||
return False
|
|
@ -1,168 +0,0 @@
|
|||
"""
|
||||
Track randomize ordered dict
|
||||
"""
|
||||
import random
|
||||
from threading import RLock
|
||||
from time import time
|
||||
|
||||
import helper_random
|
||||
|
||||
|
||||
class RandomTrackingDict(object):
|
||||
"""
|
||||
Dict with randomised order and tracking.
|
||||
|
||||
Keeps a track of how many items have been requested from the dict,
|
||||
and timeouts. Resets after all objects have been retrieved and timed out.
|
||||
The main purpose of this isn't as much putting related code together
|
||||
as performance optimisation and anonymisation of downloading of objects
|
||||
from other peers. If done using a standard dict or array, it takes
|
||||
too much CPU (and looks convoluted). Randomisation helps with anonymity.
|
||||
"""
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
maxPending = 10
|
||||
pendingTimeout = 60
|
||||
|
||||
def __init__(self):
|
||||
self.dictionary = {}
|
||||
self.indexDict = []
|
||||
self.len = 0
|
||||
self.pendingLen = 0
|
||||
self.lastPoll = 0
|
||||
self.lastObject = 0
|
||||
self.lock = RLock()
|
||||
|
||||
def __len__(self):
|
||||
return self.len
|
||||
|
||||
def __contains__(self, key):
|
||||
return key in self.dictionary
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.dictionary[key][1]
|
||||
|
||||
def _swap(self, i1, i2):
|
||||
with self.lock:
|
||||
key1 = self.indexDict[i1]
|
||||
key2 = self.indexDict[i2]
|
||||
self.indexDict[i1] = key2
|
||||
self.indexDict[i2] = key1
|
||||
self.dictionary[key1][0] = i2
|
||||
self.dictionary[key2][0] = i1
|
||||
# for quick reassignment
|
||||
return i2
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
with self.lock:
|
||||
if key in self.dictionary:
|
||||
self.dictionary[key][1] = value
|
||||
else:
|
||||
self.indexDict.append(key)
|
||||
self.dictionary[key] = [self.len, value]
|
||||
self._swap(self.len, self.len - self.pendingLen)
|
||||
self.len += 1
|
||||
|
||||
def __delitem__(self, key):
|
||||
if key not in self.dictionary:
|
||||
raise KeyError
|
||||
with self.lock:
|
||||
index = self.dictionary[key][0]
|
||||
# not pending
|
||||
if index < self.len - self.pendingLen:
|
||||
# left of pending part
|
||||
index = self._swap(index, self.len - self.pendingLen - 1)
|
||||
# pending
|
||||
else:
|
||||
self.pendingLen -= 1
|
||||
# end
|
||||
self._swap(index, self.len - 1)
|
||||
# if the following del is batched, performance of this single
|
||||
# operation can improve 4x, but it's already very fast so we'll
|
||||
# ignore it for the time being
|
||||
del self.indexDict[-1]
|
||||
del self.dictionary[key]
|
||||
self.len -= 1
|
||||
|
||||
def setMaxPending(self, maxPending):
|
||||
"""
|
||||
Sets maximum number of objects that can be retrieved from the class
|
||||
simultaneously as long as there is no timeout
|
||||
"""
|
||||
self.maxPending = maxPending
|
||||
|
||||
def setPendingTimeout(self, pendingTimeout):
|
||||
"""Sets how long to wait for a timeout if max pending is reached
|
||||
(or all objects have been retrieved)"""
|
||||
self.pendingTimeout = pendingTimeout
|
||||
|
||||
def setLastObject(self):
|
||||
"""Update timestamp for tracking of received objects"""
|
||||
self.lastObject = time()
|
||||
|
||||
def randomKeys(self, count=1):
|
||||
"""Retrieve count random keys from the dict
|
||||
that haven't already been retrieved"""
|
||||
if self.len == 0 or ((self.pendingLen >= self.maxPending or
|
||||
self.pendingLen == self.len) and self.lastPoll +
|
||||
self.pendingTimeout > time()):
|
||||
raise KeyError
|
||||
|
||||
# pylint: disable=redefined-outer-name
|
||||
with self.lock:
|
||||
# reset if we've requested all
|
||||
# and if last object received too long time ago
|
||||
if self.pendingLen == self.len and self.lastObject + \
|
||||
self.pendingTimeout < time():
|
||||
self.pendingLen = 0
|
||||
self.setLastObject()
|
||||
available = self.len - self.pendingLen
|
||||
if count > available:
|
||||
count = available
|
||||
randomIndex = helper_random.randomsample(
|
||||
range(self.len - self.pendingLen), count)
|
||||
retval = [self.indexDict[i] for i in randomIndex]
|
||||
|
||||
for i in sorted(randomIndex, reverse=True):
|
||||
# swap with one below lowest pending
|
||||
self._swap(i, self.len - self.pendingLen - 1)
|
||||
self.pendingLen += 1
|
||||
self.lastPoll = time()
|
||||
return retval
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# pylint: disable=redefined-outer-name
|
||||
def randString():
|
||||
"""helper function for tests, generates a random string"""
|
||||
retval = b''
|
||||
for _ in range(32):
|
||||
retval += chr(random.randint(0, 255))
|
||||
return retval
|
||||
|
||||
a = []
|
||||
k = RandomTrackingDict()
|
||||
d = {}
|
||||
|
||||
print("populating random tracking dict")
|
||||
a.append(time())
|
||||
for i in range(50000):
|
||||
k[randString()] = True
|
||||
a.append(time())
|
||||
print("done")
|
||||
|
||||
while k:
|
||||
retval = k.randomKeys(1000)
|
||||
if not retval:
|
||||
print("error getting random keys")
|
||||
try:
|
||||
k.randomKeys(100)
|
||||
print("bad")
|
||||
except KeyError:
|
||||
pass
|
||||
for i in retval:
|
||||
del k[i]
|
||||
a.append(time())
|
||||
|
||||
for x in range(len(a) - 1):
|
||||
print("{}i: {}.3f".format(x, a[x + 1] - a[x]))
|
|
@ -1,56 +0,0 @@
|
|||
"""
|
||||
Process data incoming from network
|
||||
"""
|
||||
import errno
|
||||
import queue as Queue
|
||||
import socket
|
||||
|
||||
from pybitmessage import state
|
||||
from pybitmessage.network.advanceddispatcher import UnknownStateError
|
||||
from pybitmessage.network.connectionpool import BMConnectionPool
|
||||
from pybitmessage.queues import receiveDataQueue
|
||||
from pybitmessage.network.threads import StoppableThread
|
||||
|
||||
|
||||
class ReceiveQueueThread(StoppableThread):
|
||||
"""This thread processes data received from the network
|
||||
(which is done by the asyncore thread)"""
|
||||
def __init__(self, num=0):
|
||||
super(ReceiveQueueThread, self).__init__(name="ReceiveQueue_%i" % num)
|
||||
|
||||
def run(self):
|
||||
while not self._stopped and state.shutdown == 0:
|
||||
try:
|
||||
dest = receiveDataQueue.get(block=True, timeout=1)
|
||||
except Queue.Empty:
|
||||
continue
|
||||
|
||||
if self._stopped or state.shutdown:
|
||||
break
|
||||
|
||||
# cycle as long as there is data
|
||||
# methods should return False if there isn't enough data,
|
||||
# or the connection is to be aborted
|
||||
|
||||
# state_* methods should return False if there isn't
|
||||
# enough data, or the connection is to be aborted
|
||||
|
||||
try:
|
||||
connection = BMConnectionPool().getConnectionByAddr(dest)
|
||||
# connection object not found
|
||||
except KeyError:
|
||||
receiveDataQueue.task_done()
|
||||
continue
|
||||
try:
|
||||
connection.process()
|
||||
# state isn't implemented
|
||||
except UnknownStateError:
|
||||
pass
|
||||
except socket.error as err:
|
||||
if err.errno == errno.EBADF:
|
||||
connection.set_state("close", 0)
|
||||
else:
|
||||
self.logger.error('Socket error: %s', err)
|
||||
except:
|
||||
self.logger.error('Error processing', exc_info=True)
|
||||
receiveDataQueue.task_done()
|
|
@ -1,143 +0,0 @@
|
|||
"""
|
||||
SOCKS4a proxy module
|
||||
"""
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
import socket
|
||||
import struct
|
||||
|
||||
from network.proxy import Proxy, ProxyError, GeneralProxyError
|
||||
|
||||
|
||||
class Socks4aError(ProxyError):
|
||||
"""SOCKS4a error base class"""
|
||||
errorCodes = (
|
||||
"Request granted",
|
||||
"Request rejected or failed",
|
||||
"Request rejected because SOCKS server cannot connect to identd"
|
||||
" on the client",
|
||||
"Request rejected because the client program and identd report"
|
||||
" different user-ids",
|
||||
"Unknown error"
|
||||
)
|
||||
|
||||
|
||||
class Socks4a(Proxy):
|
||||
"""SOCKS4a proxy class"""
|
||||
def __init__(self, address=None):
|
||||
Proxy.__init__(self, address)
|
||||
self.ipaddr = None
|
||||
self.destport = address[1]
|
||||
|
||||
def state_init(self):
|
||||
"""Protocol initialisation (before connection is established)"""
|
||||
self.set_state("auth_done", 0)
|
||||
return True
|
||||
|
||||
def state_pre_connect(self):
|
||||
"""Handle feedback from SOCKS4a while it is connecting on our behalf"""
|
||||
# Get the response
|
||||
if self.read_buf[0:1] != chr(0x00).encode():
|
||||
# bad data
|
||||
self.close()
|
||||
raise GeneralProxyError(1)
|
||||
elif self.read_buf[1:2] != chr(0x5A).encode():
|
||||
# Connection failed
|
||||
self.close()
|
||||
if ord(self.read_buf[1:2]) in (91, 92, 93):
|
||||
# socks 4 error
|
||||
raise Socks4aError(ord(self.read_buf[1:2]) - 90)
|
||||
else:
|
||||
raise Socks4aError(4)
|
||||
# Get the bound address/port
|
||||
self.boundport = struct.unpack(">H", self.read_buf[2:4])[0]
|
||||
self.boundaddr = self.read_buf[4:]
|
||||
self.__proxysockname = (self.boundaddr, self.boundport)
|
||||
if self.ipaddr:
|
||||
self.__proxypeername = (
|
||||
socket.inet_ntoa(self.ipaddr), self.destination[1])
|
||||
else:
|
||||
self.__proxypeername = (self.destination[0], self.destport)
|
||||
self.set_state("proxy_handshake_done", length=8)
|
||||
return True
|
||||
|
||||
def proxy_sock_name(self):
|
||||
"""
|
||||
Handle return value when using SOCKS4a for DNS resolving
|
||||
instead of connecting.
|
||||
"""
|
||||
return socket.inet_ntoa(self.__proxysockname[0])
|
||||
|
||||
|
||||
class Socks4aConnection(Socks4a):
|
||||
"""Child SOCKS4a class used for making outbound connections."""
|
||||
def __init__(self, address):
|
||||
Socks4a.__init__(self, address=address)
|
||||
|
||||
def state_auth_done(self):
|
||||
"""Request connection to be made"""
|
||||
# Now we can request the actual connection
|
||||
rmtrslv = False
|
||||
self.append_write_buf(
|
||||
struct.pack('>BBH', 0x04, 0x01, self.destination[1]))
|
||||
# If the given destination address is an IP address, we'll
|
||||
# use the IPv4 address request even if remote resolving was specified.
|
||||
try:
|
||||
self.ipaddr = socket.inet_aton(self.destination[0])
|
||||
self.append_write_buf(self.ipaddr)
|
||||
except socket.error:
|
||||
# Well it's not an IP number, so it's probably a DNS name.
|
||||
if self._remote_dns:
|
||||
# Resolve remotely
|
||||
rmtrslv = True
|
||||
self.ipaddr = None
|
||||
self.append_write_buf(
|
||||
struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01))
|
||||
else:
|
||||
# Resolve locally
|
||||
self.ipaddr = socket.inet_aton(
|
||||
socket.gethostbyname(self.destination[0]))
|
||||
self.append_write_buf(self.ipaddr)
|
||||
if self._auth:
|
||||
self.append_write_buf(self._auth[0])
|
||||
self.append_write_buf(chr(0x00).encode())
|
||||
if rmtrslv:
|
||||
self.append_write_buf(self.destination[0] + chr(0x00).encode())
|
||||
self.set_state("pre_connect", length=0, expectBytes=8)
|
||||
return True
|
||||
|
||||
def state_pre_connect(self):
|
||||
"""Tell SOCKS4a to initiate a connection"""
|
||||
try:
|
||||
return Socks4a.state_pre_connect(self)
|
||||
except Socks4aError as e:
|
||||
self.close_reason = e.message
|
||||
self.set_state("close")
|
||||
|
||||
|
||||
class Socks4aResolver(Socks4a):
|
||||
"""DNS resolver class using SOCKS4a"""
|
||||
def __init__(self, host):
|
||||
self.host = host
|
||||
self.port = 8444
|
||||
Socks4a.__init__(self, address=(self.host, self.port))
|
||||
|
||||
def state_auth_done(self):
|
||||
"""Request connection to be made"""
|
||||
# Now we can request the actual connection
|
||||
self.append_write_buf(
|
||||
struct.pack('>BBH', 0x04, 0xF0, self.destination[1]))
|
||||
self.append_write_buf(struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01))
|
||||
if self._auth:
|
||||
self.append_write_buf(self._auth[0])
|
||||
self.append_write_buf(chr(0x00).encode())
|
||||
self.append_write_buf(self.host + chr(0x00).encode())
|
||||
self.set_state("pre_connect", length=0, expectBytes=8)
|
||||
return True
|
||||
|
||||
def resolved(self):
|
||||
"""
|
||||
Resolving is done, process the return value. To use this within
|
||||
PyBitmessage, a callback needs to be implemented which hasn't
|
||||
been done yet.
|
||||
"""
|
||||
print("Resolved {} as {}".format(self.host, self.proxy_sock_name()))
|
|
@ -1,222 +0,0 @@
|
|||
"""
|
||||
SOCKS5 proxy module
|
||||
"""
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
import socket
|
||||
import struct
|
||||
|
||||
|
||||
from network.proxy import GeneralProxyError, Proxy, ProxyError
|
||||
|
||||
from .node import Peer
|
||||
|
||||
|
||||
class Socks5AuthError(ProxyError):
|
||||
"""Rised when the socks5 protocol encounters an authentication error"""
|
||||
errorCodes = (
|
||||
"Succeeded",
|
||||
"Authentication is required",
|
||||
"All offered authentication methods were rejected",
|
||||
"Unknown username or invalid password",
|
||||
"Unknown error"
|
||||
)
|
||||
|
||||
|
||||
class Socks5Error(ProxyError):
|
||||
"""Rised when socks5 protocol encounters an error"""
|
||||
errorCodes = (
|
||||
"Succeeded",
|
||||
"General SOCKS server failure",
|
||||
"Connection not allowed by ruleset",
|
||||
"Network unreachable",
|
||||
"Host unreachable",
|
||||
"Connection refused",
|
||||
"TTL expired",
|
||||
"Command not supported",
|
||||
"Address type not supported",
|
||||
"Unknown error"
|
||||
)
|
||||
|
||||
|
||||
class Socks5(Proxy):
|
||||
"""A socks5 proxy base class"""
|
||||
def __init__(self, address=None):
|
||||
Proxy.__init__(self, address)
|
||||
self.ipaddr = None
|
||||
self.destport = address[1]
|
||||
|
||||
def state_init(self):
|
||||
"""Protocol initialization (before connection is established)"""
|
||||
if self._auth:
|
||||
self.append_write_buf(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
|
||||
else:
|
||||
self.append_write_buf(struct.pack('BBB', 0x05, 0x01, 0x00))
|
||||
self.set_state("auth_1", length=0, expectBytes=2)
|
||||
return True
|
||||
|
||||
def state_auth_1(self):
|
||||
"""Perform authentication if peer is requesting it."""
|
||||
ret = struct.unpack('BB', self.read_buf[:2])
|
||||
if ret[0] != 5:
|
||||
# general error
|
||||
raise GeneralProxyError(1)
|
||||
elif ret[1] == 0:
|
||||
# no auth required
|
||||
self.set_state("auth_done", length=2)
|
||||
elif ret[1] == 2:
|
||||
# username/password
|
||||
self.append_write_buf(
|
||||
struct.pack(
|
||||
'BB', 1, len(self._auth[0])) + self._auth[0] + struct.pack(
|
||||
'B', len(self._auth[1])) + self._auth[1])
|
||||
self.set_state("auth_needed", length=2, expectBytes=2)
|
||||
else:
|
||||
if ret[1] == 0xff:
|
||||
# auth error
|
||||
raise Socks5AuthError(2)
|
||||
else:
|
||||
# other error
|
||||
raise GeneralProxyError(1)
|
||||
return True
|
||||
|
||||
def state_auth_needed(self):
|
||||
"""Handle response to authentication attempt"""
|
||||
ret = struct.unpack('BB', self.read_buf[0:2])
|
||||
if ret[0] != 1:
|
||||
# general error
|
||||
raise GeneralProxyError(1)
|
||||
if ret[1] != 0:
|
||||
# auth error
|
||||
raise Socks5AuthError(3)
|
||||
# all ok
|
||||
self.set_state("auth_done", length=2)
|
||||
return True
|
||||
|
||||
def state_pre_connect(self):
|
||||
"""Handle feedback from socks5 while it is connecting on our behalf."""
|
||||
# Get the response
|
||||
if self.read_buf[0:1] != chr(0x05).encode():
|
||||
self.close()
|
||||
raise GeneralProxyError(1)
|
||||
elif self.read_buf[1:2] != chr(0x00).encode():
|
||||
# Connection failed
|
||||
self.close()
|
||||
if ord(self.read_buf[1:2]) <= 8:
|
||||
raise Socks5Error(ord(self.read_buf[1:2]))
|
||||
else:
|
||||
raise Socks5Error(9)
|
||||
# Get the bound address/port
|
||||
elif self.read_buf[3:4] == chr(0x01).encode():
|
||||
self.set_state("proxy_addr_1", length=4, expectBytes=4)
|
||||
elif self.read_buf[3:4] == chr(0x03).encode():
|
||||
self.set_state("proxy_addr_2_1", length=4, expectBytes=1)
|
||||
else:
|
||||
self.close()
|
||||
raise GeneralProxyError(1)
|
||||
return True
|
||||
|
||||
def state_proxy_addr_1(self):
|
||||
"""Handle IPv4 address returned for peer"""
|
||||
self.boundaddr = self.read_buf[0:4]
|
||||
self.set_state("proxy_port", length=4, expectBytes=2)
|
||||
return True
|
||||
|
||||
def state_proxy_addr_2_1(self):
|
||||
"""
|
||||
Handle other addresses than IPv4 returned for peer
|
||||
(e.g. IPv6, onion, ...). This is part 1 which retrieves the
|
||||
length of the data.
|
||||
"""
|
||||
self.address_length = ord(self.read_buf[0:1])
|
||||
self.set_state(
|
||||
"proxy_addr_2_2", length=1, expectBytes=self.address_length)
|
||||
return True
|
||||
|
||||
def state_proxy_addr_2_2(self):
|
||||
"""
|
||||
Handle other addresses than IPv4 returned for peer
|
||||
(e.g. IPv6, onion, ...). This is part 2 which retrieves the data.
|
||||
"""
|
||||
self.boundaddr = self.read_buf[0:self.address_length]
|
||||
self.set_state("proxy_port", length=self.address_length, expectBytes=2)
|
||||
return True
|
||||
|
||||
def state_proxy_port(self):
|
||||
"""Handle peer's port being returned."""
|
||||
self.boundport = struct.unpack(">H", self.read_buf[0:2])[0]
|
||||
self.__proxysockname = (self.boundaddr, self.boundport)
|
||||
if self.ipaddr is not None:
|
||||
self.__proxypeername = (
|
||||
socket.inet_ntoa(self.ipaddr), self.destination[1])
|
||||
else:
|
||||
self.__proxypeername = (self.destination[0], self.destport)
|
||||
self.set_state("proxy_handshake_done", length=2)
|
||||
return True
|
||||
|
||||
def proxy_sock_name(self):
|
||||
"""Handle return value when using SOCKS5
|
||||
for DNS resolving instead of connecting."""
|
||||
return socket.inet_ntoa(self.__proxysockname[0])
|
||||
|
||||
|
||||
class Socks5Connection(Socks5):
|
||||
"""Child socks5 class used for making outbound connections."""
|
||||
def state_auth_done(self):
|
||||
"""Request connection to be made"""
|
||||
# Now we can request the actual connection
|
||||
self.append_write_buf(struct.pack('BBB', 0x05, 0x01, 0x00))
|
||||
# If the given destination address is an IP address, we'll
|
||||
# use the IPv4 address request even if remote resolving was specified.
|
||||
try:
|
||||
self.ipaddr = socket.inet_aton(self.destination[0])
|
||||
self.append_write_buf(chr(0x01).encode() + self.ipaddr)
|
||||
except socket.error: # may be IPv6!
|
||||
# Well it's not an IP number, so it's probably a DNS name.
|
||||
if self._remote_dns:
|
||||
# Resolve remotely
|
||||
self.ipaddr = None
|
||||
self.append_write_buf(chr(0x03).encode() + chr(
|
||||
len(self.destination[0])).encode() + self.destination[0])
|
||||
else:
|
||||
# Resolve locally
|
||||
self.ipaddr = socket.inet_aton(
|
||||
socket.gethostbyname(self.destination[0]))
|
||||
self.append_write_buf(chr(0x01).encode() + self.ipaddr)
|
||||
self.append_write_buf(struct.pack(">H", self.destination[1]))
|
||||
self.set_state("pre_connect", length=0, expectBytes=4)
|
||||
return True
|
||||
|
||||
def state_pre_connect(self):
|
||||
"""Tell socks5 to initiate a connection"""
|
||||
try:
|
||||
return Socks5.state_pre_connect(self)
|
||||
except Socks5Error as e:
|
||||
self.close_reason = e.message
|
||||
self.set_state("close")
|
||||
|
||||
|
||||
class Socks5Resolver(Socks5):
|
||||
"""DNS resolver class using socks5"""
|
||||
def __init__(self, host):
|
||||
self.host = host
|
||||
self.port = 8444
|
||||
Socks5.__init__(self, address=Peer(self.host, self.port))
|
||||
|
||||
def state_auth_done(self):
|
||||
"""Perform resolving"""
|
||||
# Now we can request the actual connection
|
||||
self.append_write_buf(struct.pack('BBB', 0x05, 0xF0, 0x00))
|
||||
self.append_write_buf(chr(0x03).encode() + chr(
|
||||
len(self.host)).encode() + str(self.host))
|
||||
self.append_write_buf(struct.pack(">H", self.port))
|
||||
self.set_state("pre_connect", length=0, expectBytes=4)
|
||||
return True
|
||||
|
||||
def resolved(self):
|
||||
"""
|
||||
Resolving is done, process the return value.
|
||||
To use this within PyBitmessage, a callback needs to be
|
||||
implemented which hasn't been done yet.
|
||||
"""
|
||||
print("Resolved {} as {}".format(self.host, self.proxy_sock_name()))
|
|
@ -1,71 +0,0 @@
|
|||
"""
|
||||
Network statistics
|
||||
"""
|
||||
import time
|
||||
|
||||
from network import asyncore_pollchoose as asyncore
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.objectracker import missingObjects
|
||||
|
||||
|
||||
lastReceivedTimestamp = time.time()
|
||||
lastReceivedBytes = 0
|
||||
currentReceivedSpeed = 0
|
||||
lastSentTimestamp = time.time()
|
||||
lastSentBytes = 0
|
||||
currentSentSpeed = 0
|
||||
|
||||
|
||||
def connectedHostsList():
|
||||
"""List of all the connected hosts"""
|
||||
return BMConnectionPool().establishedConnections()
|
||||
|
||||
|
||||
def sentBytes():
|
||||
"""Sending Bytes"""
|
||||
return asyncore.sentBytes
|
||||
|
||||
|
||||
def uploadSpeed():
|
||||
"""Getting upload speed"""
|
||||
# pylint: disable=global-statement
|
||||
global lastSentTimestamp, lastSentBytes, currentSentSpeed
|
||||
currentTimestamp = time.time()
|
||||
if int(lastSentTimestamp) < int(currentTimestamp):
|
||||
currentSentBytes = asyncore.sentBytes
|
||||
currentSentSpeed = int(
|
||||
(currentSentBytes - lastSentBytes) / (
|
||||
currentTimestamp - lastSentTimestamp))
|
||||
lastSentBytes = currentSentBytes
|
||||
lastSentTimestamp = currentTimestamp
|
||||
return currentSentSpeed
|
||||
|
||||
|
||||
def receivedBytes():
|
||||
"""Receiving Bytes"""
|
||||
return asyncore.receivedBytes
|
||||
|
||||
|
||||
def downloadSpeed():
|
||||
"""Getting download speed"""
|
||||
# pylint: disable=global-statement
|
||||
global lastReceivedTimestamp, lastReceivedBytes, currentReceivedSpeed
|
||||
currentTimestamp = time.time()
|
||||
if int(lastReceivedTimestamp) < int(currentTimestamp):
|
||||
currentReceivedBytes = asyncore.receivedBytes
|
||||
currentReceivedSpeed = int(
|
||||
(currentReceivedBytes - lastReceivedBytes) / (
|
||||
currentTimestamp - lastReceivedTimestamp))
|
||||
lastReceivedBytes = currentReceivedBytes
|
||||
lastReceivedTimestamp = currentTimestamp
|
||||
return currentReceivedSpeed
|
||||
|
||||
|
||||
def pendingDownload():
|
||||
"""Getting pending downloads"""
|
||||
return len(missingObjects)
|
||||
|
||||
|
||||
def pendingUpload():
|
||||
"""Getting pending uploads"""
|
||||
return 0
|
|
@ -1,433 +0,0 @@
|
|||
"""
|
||||
TCP protocol handler
|
||||
"""
|
||||
# pylint: disable=too-many-ancestors, protected-access
|
||||
import logging
|
||||
import math
|
||||
import random
|
||||
import socket
|
||||
import time
|
||||
|
||||
import addresses
|
||||
import network.asyncore_pollchoose as asyncore
|
||||
from network import connectionpool
|
||||
import helper_random
|
||||
import knownnodes
|
||||
import protocol
|
||||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from helper_random import randomBytes
|
||||
from inventory import Inventory
|
||||
from network.advanceddispatcher import AdvancedDispatcher
|
||||
from network.assemble import assemble_addr
|
||||
from network.bmproto import BMProto
|
||||
from network.constants import MAX_OBJECT_COUNT
|
||||
from network.dandelion import Dandelion
|
||||
from network.objectracker import ObjectTracker
|
||||
from network.socks4a import Socks4aConnection
|
||||
from network.socks5 import Socks5Connection
|
||||
from network.tls import TLSDispatcher
|
||||
from .node import Peer
|
||||
from queues import UISignalQueue, invQueue, receiveDataQueue
|
||||
# pylint: disable=logging-format-interpolation
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 #: Equals three hours
|
||||
|
||||
|
||||
class TCPConnection(BMProto, TLSDispatcher):
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
"""
|
||||
.. todo:: Look to understand and/or fix the non-parent-init-called
|
||||
"""
|
||||
|
||||
def __init__(self, address=None, sock=None):
|
||||
BMProto.__init__(self, address=address, sock=sock)
|
||||
self.verackReceived = False
|
||||
self.verackSent = False
|
||||
self.streams = [0]
|
||||
self.fullyEstablished = False
|
||||
self.connectedAt = 0
|
||||
self.skipUntil = 0
|
||||
if address is None and sock is not None:
|
||||
self.destination = Peer(*sock.getpeername())
|
||||
self.isOutbound = False
|
||||
TLSDispatcher.__init__(self, sock, server_side=True)
|
||||
self.connectedAt = time.time()
|
||||
logger.debug(
|
||||
'Received connection from %s:%i',
|
||||
self.destination.host, self.destination.port)
|
||||
self.nodeid = randomBytes(8)
|
||||
elif address is not None and sock is not None:
|
||||
TLSDispatcher.__init__(self, sock, server_side=False)
|
||||
self.isOutbound = True
|
||||
logger.debug(
|
||||
'Outbound proxy connection to %s:%i',
|
||||
self.destination.host, self.destination.port)
|
||||
else:
|
||||
self.destination = address
|
||||
self.isOutbound = True
|
||||
try:
|
||||
self.create_socket(
|
||||
socket.AF_INET6 if ":" in address.host else socket.AF_INET,
|
||||
socket.SOCK_STREAM)
|
||||
except TypeError:
|
||||
self.create_socket(
|
||||
socket.AF_INET6 if ':'.encode() in address.host else socket.AF_INET,
|
||||
socket.SOCK_STREAM)
|
||||
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
TLSDispatcher.__init__(self, sock, server_side=False)
|
||||
self.connect(self.destination)
|
||||
logger.debug(
|
||||
'Connecting to {}:{}'.format(
|
||||
self.destination.host, self.destination.port))
|
||||
try:
|
||||
self.local = (
|
||||
protocol.checkIPAddress(
|
||||
protocol.encodeHost(self.destination.host), True) and
|
||||
not protocol.checkSocksIP(self.destination.host)
|
||||
)
|
||||
except socket.error:
|
||||
# it's probably a hostname
|
||||
pass
|
||||
self.network_group = protocol.network_group(self.destination.host)
|
||||
ObjectTracker.__init__(self) # pylint: disable=non-parent-init-called
|
||||
self.bm_proto_reset()
|
||||
self.set_state("bm_header", expectBytes=protocol.Header.size)
|
||||
|
||||
def antiIntersectionDelay(self, initial=False):
|
||||
"""
|
||||
This is a defense against the so called intersection attacks.
|
||||
|
||||
It is called when you notice peer is requesting non-existing
|
||||
objects, or right after the connection is established. It will
|
||||
estimate how long an object will take to propagate across the
|
||||
network, and skip processing "getdata" requests until then. This
|
||||
means an attacker only has one shot per IP to perform the attack.
|
||||
"""
|
||||
# estimated time for a small object to propagate across the
|
||||
# whole network
|
||||
max_known_nodes = max(
|
||||
len(knownnodes.knownNodes[x]) for x in knownnodes.knownNodes)
|
||||
delay = math.ceil(math.log(max_known_nodes + 2, 20)) * (
|
||||
0.2 + invQueue.queueCount / 2.0)
|
||||
# take the stream with maximum amount of nodes
|
||||
# +2 is to avoid problems with log(0) and log(1)
|
||||
# 20 is avg connected nodes count
|
||||
# 0.2 is avg message transmission time
|
||||
if delay > 0:
|
||||
if initial:
|
||||
self.skipUntil = self.connectedAt + delay
|
||||
if self.skipUntil > time.time():
|
||||
logger.debug(
|
||||
'Initial skipping processing getdata for %.2fs',
|
||||
self.skipUntil - time.time())
|
||||
else:
|
||||
logger.debug(
|
||||
'Skipping processing getdata due to missing object'
|
||||
' for %.2fs', delay)
|
||||
self.skipUntil = time.time() + delay
|
||||
|
||||
def state_connection_fully_established(self):
|
||||
"""
|
||||
State after the bitmessage protocol handshake is completed
|
||||
(version/verack exchange, and if both side support TLS,
|
||||
the TLS handshake as well).
|
||||
"""
|
||||
self.set_connection_fully_established()
|
||||
self.set_state("bm_header")
|
||||
self.bm_proto_reset()
|
||||
return True
|
||||
|
||||
def set_connection_fully_established(self):
|
||||
"""Initiate inventory synchronisation."""
|
||||
if not self.isOutbound and not self.local:
|
||||
state.clientHasReceivedIncomingConnections = True
|
||||
UISignalQueue.put(('setStatusIcon', 'green'))
|
||||
UISignalQueue.put(
|
||||
('updateNetworkStatusTab', (
|
||||
self.isOutbound, True, self.destination)))
|
||||
self.antiIntersectionDelay(True)
|
||||
self.fullyEstablished = True
|
||||
if self.isOutbound:
|
||||
knownnodes.increaseRating(self.destination)
|
||||
Dandelion().maybeAddStem(self)
|
||||
self.sendAddr()
|
||||
self.sendBigInv()
|
||||
|
||||
def sendAddr(self):
|
||||
"""Send a partial list of known addresses to peer."""
|
||||
# We are going to share a maximum number of 1000 addrs (per overlapping
|
||||
# stream) with our peer. 500 from overlapping streams, 250 from the
|
||||
# left child stream, and 250 from the right child stream.
|
||||
maxAddrCount = BMConfigParser().safeGetInt(
|
||||
"bitmessagesettings", "maxaddrperstreamsend", 500)
|
||||
|
||||
templist = []
|
||||
addrs = {}
|
||||
for stream in self.streams:
|
||||
with knownnodes.knownNodesLock:
|
||||
for nitro, sitro in enumerate((stream, stream * 2, stream * 2 + 1)):
|
||||
nodes = knownnodes.knownNodes.get(sitro)
|
||||
if not nodes:
|
||||
continue
|
||||
# only if more recent than 3 hours
|
||||
# and having positive or neutral rating
|
||||
filtered = [
|
||||
(k, v) for k, v in iter(nodes.items())
|
||||
if v["lastseen"] > int(time.time()) -
|
||||
maximumAgeOfNodesThatIAdvertiseToOthers and
|
||||
v["rating"] >= 0 and len(k.host) <= 22
|
||||
]
|
||||
# sent 250 only if the remote isn't interested in it
|
||||
elemCount = min(
|
||||
len(filtered),
|
||||
maxAddrCount / 2 if nitro else maxAddrCount)
|
||||
addrs[sitro] = helper_random.randomsample(filtered, elemCount)
|
||||
for substream in addrs:
|
||||
for peer, params in addrs[substream]:
|
||||
templist.append((substream, peer, params["lastseen"]))
|
||||
if templist:
|
||||
self.append_write_buf(assemble_addr(templist))
|
||||
|
||||
def sendBigInv(self):
|
||||
"""
|
||||
Send hashes of all inventory objects, chunked as the protocol has
|
||||
a per-command limit.
|
||||
"""
|
||||
def sendChunk():
|
||||
"""Send one chunk of inv entries in one command"""
|
||||
if objectCount == 0:
|
||||
return
|
||||
logger.debug(
|
||||
'Sending huge inv message with {} objects to jcust this'
|
||||
' one peer'.format(objectCount))
|
||||
self.append_write_buf(protocol.CreatePacket(
|
||||
'inv', addresses.encodeVarint(objectCount) + payload))
|
||||
|
||||
# Select all hashes for objects in this stream.
|
||||
bigInvList = {}
|
||||
for stream in self.streams:
|
||||
# may lock for a long time, but I think it's better than
|
||||
# thousands of small locks
|
||||
with self.objectsNewToThemLock:
|
||||
for objHash in Inventory().unexpired_hashes_by_stream(stream):
|
||||
# don't advertise stem objects on bigInv
|
||||
if Dandelion().hasHash(objHash):
|
||||
continue
|
||||
bigInvList[objHash] = 0
|
||||
objectCount = 0
|
||||
payload = bytes()
|
||||
# Now let us start appending all of these hashes together. They will be
|
||||
# sent out in a big inv message to our new peer.
|
||||
|
||||
for obj_hash, _ in bigInvList.items():
|
||||
payload += obj_hash
|
||||
objectCount += 1
|
||||
|
||||
# Remove -1 below when sufficient time has passed for users to
|
||||
# upgrade to versions of PyBitmessage that accept inv with 50,000
|
||||
# items
|
||||
if objectCount >= MAX_OBJECT_COUNT - 1:
|
||||
sendChunk()
|
||||
payload = b''
|
||||
objectCount = 0
|
||||
|
||||
# flush
|
||||
sendChunk()
|
||||
|
||||
def handle_connect(self):
|
||||
"""Callback for TCP connection being established."""
|
||||
try:
|
||||
AdvancedDispatcher.handle_connect(self)
|
||||
except socket.error as e:
|
||||
# pylint: disable=protected-access
|
||||
if e.errno in asyncore._DISCONNECTED:
|
||||
logger.debug(
|
||||
'%s:%i: Connection failed: %s',
|
||||
self.destination.host, self.destination.port, e)
|
||||
return
|
||||
self.nodeid = randomBytes(8)
|
||||
self.append_write_buf(
|
||||
protocol.assembleVersionMessage(
|
||||
self.destination.host, self.destination.port,
|
||||
connectionpool.BMConnectionPool().streams,
|
||||
False, nodeid=self.nodeid))
|
||||
self.connectedAt = time.time()
|
||||
receiveDataQueue.put(self.destination)
|
||||
|
||||
def handle_read(self):
|
||||
"""Callback for reading from a socket"""
|
||||
TLSDispatcher.handle_read(self)
|
||||
if self.isOutbound and self.fullyEstablished:
|
||||
for s in self.streams:
|
||||
try:
|
||||
with knownnodes.knownNodesLock:
|
||||
knownnodes.knownNodes[s][self.destination][
|
||||
"lastseen"] = time.time()
|
||||
except KeyError:
|
||||
pass
|
||||
receiveDataQueue.put(self.destination)
|
||||
|
||||
def handle_write(self):
|
||||
"""Callback for writing to a socket"""
|
||||
TLSDispatcher.handle_write(self)
|
||||
|
||||
def handle_close(self):
|
||||
"""Callback for connection being closed."""
|
||||
if self.isOutbound and not self.fullyEstablished:
|
||||
knownnodes.decreaseRating(self.destination)
|
||||
if self.fullyEstablished:
|
||||
UISignalQueue.put((
|
||||
'updateNetworkStatusTab',
|
||||
(self.isOutbound, False, self.destination)
|
||||
))
|
||||
if self.isOutbound:
|
||||
Dandelion().maybeRemoveStem(self)
|
||||
BMProto.handle_close(self)
|
||||
|
||||
|
||||
class Socks5BMConnection(Socks5Connection, TCPConnection):
|
||||
"""SOCKS5 wrapper for TCP connections"""
|
||||
|
||||
def __init__(self, address):
|
||||
Socks5Connection.__init__(self, address=address)
|
||||
TCPConnection.__init__(self, address=address, sock=self.socket)
|
||||
self.set_state("init")
|
||||
|
||||
def state_proxy_handshake_done(self):
|
||||
"""
|
||||
State when SOCKS5 connection succeeds, we need to send a
|
||||
Bitmessage handshake to peer.
|
||||
"""
|
||||
Socks5Connection.state_proxy_handshake_done(self)
|
||||
self.nodeid = randomBytes(8)
|
||||
self.append_write_buf(
|
||||
protocol.assembleVersionMessage(
|
||||
self.destination.host, self.destination.port,
|
||||
connectionpool.BMConnectionPool().streams,
|
||||
False, nodeid=self.nodeid))
|
||||
self.set_state("bm_header", expectBytes=protocol.Header.size)
|
||||
return True
|
||||
|
||||
|
||||
class Socks4aBMConnection(Socks4aConnection, TCPConnection):
|
||||
"""SOCKS4a wrapper for TCP connections"""
|
||||
|
||||
def __init__(self, address):
|
||||
Socks4aConnection.__init__(self, address=address)
|
||||
TCPConnection.__init__(self, address=address, sock=self.socket)
|
||||
self.set_state("init")
|
||||
|
||||
def state_proxy_handshake_done(self):
|
||||
"""
|
||||
State when SOCKS4a connection succeeds, we need to send a
|
||||
Bitmessage handshake to peer.
|
||||
"""
|
||||
Socks4aConnection.state_proxy_handshake_done(self)
|
||||
self.nodeid = randomBytes(8)
|
||||
self.append_write_buf(
|
||||
protocol.assembleVersionMessage(
|
||||
self.destination.host, self.destination.port,
|
||||
connectionpool.BMConnectionPool().streams,
|
||||
False, nodeid=self.nodeid))
|
||||
self.set_state("bm_header", expectBytes=protocol.Header.size)
|
||||
return True
|
||||
|
||||
|
||||
def bootstrap(connection_class):
|
||||
"""Make bootstrapper class for connection type (connection_class)"""
|
||||
class Bootstrapper(connection_class):
|
||||
"""Base class for bootstrappers"""
|
||||
_connection_base = connection_class
|
||||
|
||||
def __init__(self, host, port):
|
||||
self._connection_base.__init__(self, Peer(host, port))
|
||||
self.close_reason = self._succeed = False
|
||||
|
||||
def bm_command_addr(self):
|
||||
"""
|
||||
Got addr message - the bootstrap succeed.
|
||||
Let BMProto process the addr message and switch state to 'close'
|
||||
"""
|
||||
BMProto.bm_command_addr(self)
|
||||
self._succeed = True
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.close_reason = "Thanks for bootstrapping!"
|
||||
self.set_state("close")
|
||||
|
||||
def handle_close(self):
|
||||
"""
|
||||
After closing the connection switch knownnodes.knownNodesActual
|
||||
back to False if the bootstrapper failed.
|
||||
"""
|
||||
self._connection_base.handle_close(self)
|
||||
if not self._succeed:
|
||||
knownnodes.knownNodesActual = False
|
||||
|
||||
return Bootstrapper
|
||||
|
||||
|
||||
class TCPServer(AdvancedDispatcher):
|
||||
"""TCP connection server for Bitmessage protocol"""
|
||||
|
||||
def __init__(self, host='127.0.0.1', port=8444):
|
||||
if '_map' not in dir(self):
|
||||
AdvancedDispatcher.__init__(self)
|
||||
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.set_reuse_addr()
|
||||
for attempt in range(50):
|
||||
try:
|
||||
if attempt > 0:
|
||||
logger.warning('Failed to bind on port %s', port)
|
||||
port = random.randint(32767, 65535)
|
||||
self.bind((host, port))
|
||||
except socket.error as e:
|
||||
if e.errno in (asyncore.EADDRINUSE, asyncore.WSAEADDRINUSE):
|
||||
continue
|
||||
else:
|
||||
if attempt > 0:
|
||||
logger.warning('Setting port to %s', port)
|
||||
BMConfigParser().set(
|
||||
'bitmessagesettings', 'port', str(port))
|
||||
BMConfigParser().save()
|
||||
break
|
||||
self.destination = Peer(host, port)
|
||||
self.bound = True
|
||||
self.listen(5)
|
||||
|
||||
def is_bound(self):
|
||||
"""Is the socket bound?"""
|
||||
try:
|
||||
return self.bound
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
def handle_accept(self):
|
||||
"""Incoming connection callback"""
|
||||
try:
|
||||
sock = self.accept()[0]
|
||||
except (TypeError, IndexError):
|
||||
return
|
||||
|
||||
state.ownAddresses[Peer(*sock.getsockname())] = True
|
||||
if (
|
||||
len(connectionpool.BMConnectionPool().inboundConnections) +
|
||||
len(connectionpool.BMConnectionPool().outboundConnections) >
|
||||
BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'maxtotalconnections') +
|
||||
BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'maxbootstrapconnections') + 10
|
||||
):
|
||||
# 10 is a sort of buffer, in between it will go through
|
||||
# the version handshake and return an error to the peer
|
||||
logger.warning("Server full, dropping connection")
|
||||
sock.close()
|
||||
return
|
||||
try:
|
||||
connectionpool.BMConnectionPool().addConnection(
|
||||
TCPConnection(sock=sock))
|
||||
except socket.error:
|
||||
pass
|
|
@ -1,241 +0,0 @@
|
|||
"""
|
||||
SSL/TLS negotiation.
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
import network.asyncore_pollchoose as asyncore
|
||||
import paths
|
||||
from network.advanceddispatcher import AdvancedDispatcher
|
||||
from queues import receiveDataQueue
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
_DISCONNECTED_SSL = frozenset((ssl.SSL_ERROR_EOF,))
|
||||
|
||||
# sslProtocolVersion
|
||||
if sys.version_info >= (2, 7, 13):
|
||||
# this means TLSv1 or higher
|
||||
# in the future change to
|
||||
# ssl.PROTOCOL_TLS1.2
|
||||
# Right now I am using the python3.5.2 and I faced the ssl for protocol due to this I
|
||||
# have used try and catch
|
||||
try:
|
||||
sslProtocolVersion = ssl.PROTOCOL_TLS # pylint: disable=no-member
|
||||
except AttributeError:
|
||||
sslProtocolVersion = ssl.PROTOCOL_SSLv23
|
||||
elif sys.version_info >= (2, 7, 9):
|
||||
# this means any SSL/TLS.
|
||||
# SSLv2 and 3 are excluded with an option after context is created
|
||||
sslProtocolVersion = ssl.PROTOCOL_SSLv23
|
||||
else:
|
||||
# this means TLSv1, there is no way to set "TLSv1 or higher" or
|
||||
# "TLSv1.2" in < 2.7.9
|
||||
sslProtocolVersion = ssl.PROTOCOL_TLSv1
|
||||
|
||||
|
||||
# ciphers
|
||||
if ssl.OPENSSL_VERSION_NUMBER >= 0x10100000 and not \
|
||||
ssl.OPENSSL_VERSION.startswith("LibreSSL"):
|
||||
sslProtocolCiphers = "AECDH-AES256-SHA@SECLEVEL=0"
|
||||
else:
|
||||
sslProtocolCiphers = "AECDH-AES256-SHA"
|
||||
|
||||
|
||||
class TLSDispatcher(AdvancedDispatcher):
|
||||
"""TLS functionality for classes derived from AdvancedDispatcher"""
|
||||
# pylint: disable=too-many-instance-attributes, too-many-arguments
|
||||
# pylint: disable=super-init-not-called
|
||||
def __init__(self, _=None, sock=None, certfile=None, keyfile=None,
|
||||
server_side=False, ciphers=sslProtocolCiphers):
|
||||
self.want_read = self.want_write = True
|
||||
if certfile is None:
|
||||
self.certfile = os.path.join(
|
||||
paths.codePath(), 'sslkeys', 'cert.pem')
|
||||
else:
|
||||
self.certfile = certfile
|
||||
if keyfile is None:
|
||||
self.keyfile = os.path.join(
|
||||
paths.codePath(), 'sslkeys', 'key.pem')
|
||||
else:
|
||||
self.keyfile = keyfile
|
||||
self.server_side = server_side
|
||||
self.ciphers = ciphers
|
||||
self.tlsStarted = False
|
||||
self.tlsDone = False
|
||||
self.tlsVersion = "N/A"
|
||||
self.isSSL = False
|
||||
self.sslSocket = None
|
||||
|
||||
def state_tls_init(self):
|
||||
"""Prepare sockets for TLS handshake"""
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.isSSL = True
|
||||
self.tlsStarted = True
|
||||
# Once the connection has been established,
|
||||
# it's safe to wrap the socket.
|
||||
self.want_read = self.want_write = True
|
||||
self.set_state("tls_handshake")
|
||||
return False
|
||||
# if hasattr(self.socket, "context"):
|
||||
# self.socket.context.set_ecdh_curve("secp256k1")
|
||||
|
||||
@staticmethod
|
||||
def state_tls_handshake():
|
||||
"""
|
||||
Do nothing while TLS handshake is pending, as during this phase
|
||||
we need to react to callbacks instead
|
||||
"""
|
||||
return False
|
||||
|
||||
def writable(self):
|
||||
"""Handle writable checks for TLS-enabled sockets"""
|
||||
try:
|
||||
if self.tlsStarted and not self.tlsDone and not self.write_buf:
|
||||
return self.want_write
|
||||
return AdvancedDispatcher.writable(self)
|
||||
except AttributeError:
|
||||
return AdvancedDispatcher.writable(self)
|
||||
|
||||
def readable(self):
|
||||
"""Handle readable check for TLS-enabled sockets"""
|
||||
try:
|
||||
# during TLS handshake, and after flushing write buffer,
|
||||
# return status of last handshake attempt
|
||||
if self.tlsStarted and not self.tlsDone and not self.write_buf:
|
||||
return self.want_read
|
||||
# prior to TLS handshake,
|
||||
# receiveDataThread should emulate synchronous behaviour
|
||||
elif not self.fullyEstablished and (
|
||||
self.expectBytes == 0 or not self.write_buf_empty()):
|
||||
return False
|
||||
return AdvancedDispatcher.readable(self)
|
||||
except AttributeError:
|
||||
return AdvancedDispatcher.readable(self)
|
||||
|
||||
def handle_read(self): # pylint: disable=inconsistent-return-statements
|
||||
"""
|
||||
Handle reads for sockets during TLS handshake. Requires special
|
||||
treatment as during the handshake, buffers must remain empty
|
||||
and normal reads must be ignored.
|
||||
"""
|
||||
try:
|
||||
if self.tlsStarted and not self.tlsDone and not self.write_buf:
|
||||
# logger.debug(
|
||||
# "%s:%i TLS handshaking (read)", self.destination.host,
|
||||
# self.destination.port)
|
||||
self.tls_handshake()
|
||||
else:
|
||||
# logger.debug(
|
||||
# "%s:%i Not TLS handshaking (read)", self.destination.host,
|
||||
# self.destination.port)
|
||||
return AdvancedDispatcher.handle_read(self)
|
||||
except AttributeError:
|
||||
return AdvancedDispatcher.handle_read(self)
|
||||
except ssl.SSLError as err:
|
||||
if err.errno == ssl.SSL_ERROR_WANT_READ:
|
||||
return
|
||||
elif err.errno in _DISCONNECTED_SSL:
|
||||
self.handle_close()
|
||||
return
|
||||
logger.info("SSL Error: %s", str(err))
|
||||
self.handle_close()
|
||||
return
|
||||
|
||||
def handle_write(self): # pylint: disable=inconsistent-return-statements
|
||||
"""
|
||||
Handle writes for sockets during TLS handshake. Requires special
|
||||
treatment as during the handshake, buffers must remain empty
|
||||
and normal writes must be ignored.
|
||||
"""
|
||||
try:
|
||||
# wait for write buffer flush
|
||||
if self.tlsStarted and not self.tlsDone and not self.write_buf:
|
||||
# logger.debug(
|
||||
# "%s:%i TLS handshaking (write)", self.destination.host,
|
||||
# self.destination.port)
|
||||
self.tls_handshake()
|
||||
else:
|
||||
return AdvancedDispatcher.handle_write(self)
|
||||
except AttributeError:
|
||||
return AdvancedDispatcher.handle_write(self)
|
||||
except ssl.SSLError as err:
|
||||
if err.errno == ssl.SSL_ERROR_WANT_WRITE:
|
||||
return 0
|
||||
elif err.errno in _DISCONNECTED_SSL:
|
||||
self.handle_close()
|
||||
return 0
|
||||
logger.info("SSL Error: %s", str(err))
|
||||
self.handle_close()
|
||||
return
|
||||
|
||||
def tls_handshake(self): # pylint:disable=too-many-branches
|
||||
"""Perform TLS handshake and handle its stages"""
|
||||
# wait for flush
|
||||
# self.sslSocket.setblocking(0)
|
||||
if self.write_buf:
|
||||
return False
|
||||
if not self.sslSocket:
|
||||
self.del_channel()
|
||||
if sys.version_info >= (2, 7, 9):
|
||||
context = ssl.create_default_context(
|
||||
purpose=ssl.Purpose.SERVER_AUTH
|
||||
if self.server_side else ssl.Purpose.CLIENT_AUTH)
|
||||
context.set_ciphers(self.ciphers)
|
||||
context.set_ecdh_curve("secp256k1")
|
||||
context.check_hostname = False
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
# also exclude TLSv1 and TLSv1.1 in the future
|
||||
context.options = ssl.OP_ALL | ssl.OP_NO_SSLv2 |\
|
||||
ssl.OP_NO_SSLv3 | ssl.OP_SINGLE_ECDH_USE |\
|
||||
ssl.OP_CIPHER_SERVER_PREFERENCE
|
||||
self.sslSocket = context.wrap_socket(
|
||||
self.socket, server_side=self.server_side,
|
||||
do_handshake_on_connect=False)
|
||||
else:
|
||||
self.sslSocket = ssl.wrap_socket(
|
||||
self.socket, server_side=self.server_side,
|
||||
ssl_version=sslProtocolVersion,
|
||||
certfile=self.certfile, keyfile=self.keyfile,
|
||||
ciphers=self.ciphers, do_handshake_on_connect=False)
|
||||
self.sslSocket.setblocking(0)
|
||||
self.set_socket(self.sslSocket)
|
||||
# Perform the handshake.
|
||||
try:
|
||||
self.sslSocket.do_handshake()
|
||||
except ssl.SSLError as err:
|
||||
self.want_read = self.want_write = False
|
||||
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
|
||||
self.want_read = True
|
||||
if err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
|
||||
self.want_write = True
|
||||
if not (self.want_write or self.want_read):
|
||||
raise
|
||||
except socket.error as err:
|
||||
# pylint: disable=protected-access
|
||||
if err.errno in asyncore._DISCONNECTED:
|
||||
self.handle_close()
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
if sys.version_info >= (2, 7, 9):
|
||||
self.tlsVersion = self.sslSocket.version()
|
||||
logger.debug(
|
||||
'%s:%i: TLS handshake success, TLS protocol version: %s',
|
||||
self.destination.host, self.destination.port,
|
||||
self.tlsVersion)
|
||||
else:
|
||||
self.tlsVersion = "TLSv1"
|
||||
logger.debug(
|
||||
'%s:%i: TLS handshake success',
|
||||
self.destination.host, self.destination.port)
|
||||
# The handshake has completed, so remove this channel and...
|
||||
self.tlsDone = True
|
||||
|
||||
self.bm_proto_reset()
|
||||
self.set_state("connection_fully_established")
|
||||
receiveDataQueue.put(self.destination)
|
||||
return False
|
|
@ -1,152 +0,0 @@
|
|||
"""
|
||||
UDP protocol handler
|
||||
"""
|
||||
import logging
|
||||
import socket
|
||||
import time
|
||||
|
||||
import protocol
|
||||
from network.bmproto import BMProto
|
||||
from network.objectracker import ObjectTracker
|
||||
from .node import Peer
|
||||
import state
|
||||
|
||||
from queues import receiveDataQueue
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
# pylint: disable=logging-format-interpolation
|
||||
|
||||
|
||||
class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes
|
||||
"""Bitmessage protocol over UDP (class)"""
|
||||
port = 8444
|
||||
announceInterval = 60
|
||||
|
||||
def __init__(self, host=None, sock=None, announcing=False):
|
||||
# pylint: disable=bad-super-call
|
||||
super(BMProto, self).__init__(sock=sock)
|
||||
self.verackReceived = True
|
||||
self.verackSent = True
|
||||
# .. todo:: sort out streams
|
||||
self.streams = [1]
|
||||
self.fullyEstablished = True
|
||||
self.connectedAt = 0
|
||||
self.skipUntil = 0
|
||||
if sock is None:
|
||||
if host is None:
|
||||
host = ''
|
||||
self.create_socket(
|
||||
socket.AF_INET6 if ":" in host else socket.AF_INET,
|
||||
socket.SOCK_DGRAM
|
||||
)
|
||||
self.set_socket_reuse()
|
||||
logger.info("Binding UDP socket to %s:%i", host, self.port)
|
||||
self.socket.bind((host, self.port))
|
||||
else:
|
||||
self.socket = sock
|
||||
self.set_socket_reuse()
|
||||
self.listening = Peer(*self.socket.getsockname())
|
||||
self.destination = Peer(*self.socket.getsockname())
|
||||
ObjectTracker.__init__(self)
|
||||
self.connecting = False
|
||||
self.connected = True
|
||||
self.announcing = announcing
|
||||
self.set_state("bm_header", expectBytes=protocol.Header.size)
|
||||
|
||||
def set_socket_reuse(self):
|
||||
"""Set socket reuse option"""
|
||||
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
|
||||
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
try:
|
||||
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# disable most commands before doing research / testing
|
||||
# only addr (peer discovery), error and object are implemented
|
||||
|
||||
def bm_command_getdata(self):
|
||||
return True
|
||||
|
||||
def bm_command_inv(self):
|
||||
return True
|
||||
|
||||
def bm_command_addr(self):
|
||||
addresses = self._decode_addr()
|
||||
# only allow peer discovery from private IPs in order to avoid
|
||||
# attacks from random IPs on the internet
|
||||
self.local = True
|
||||
remoteport = False
|
||||
|
||||
for seenTime, stream, _, ip, port in addresses:
|
||||
decodedIP = protocol.checkIPAddress(bytes(ip))
|
||||
if stream not in state.streamsInWhichIAmParticipating:
|
||||
continue
|
||||
if (seenTime < time.time() - self.maxTimeOffset
|
||||
or seenTime > time.time() + self.maxTimeOffset):
|
||||
continue
|
||||
if decodedIP is False:
|
||||
# if the address isn't local, interpret it as
|
||||
# the host's own announcement
|
||||
remoteport = port
|
||||
if remoteport is False:
|
||||
return True
|
||||
logger.debug(
|
||||
"received peer discovery from {}:{} (port {}):".format(
|
||||
self.destination.host, self.destination.port, remoteport))
|
||||
if self.local:
|
||||
state.discoveredPeers[Peer(self.destination.host, remoteport)] = \
|
||||
time.time()
|
||||
return True
|
||||
|
||||
def bm_command_portcheck(self):
|
||||
return True
|
||||
|
||||
def bm_command_ping(self):
|
||||
return True
|
||||
|
||||
def bm_command_pong(self):
|
||||
return True
|
||||
|
||||
def bm_command_verack(self):
|
||||
return True
|
||||
|
||||
def bm_command_version(self):
|
||||
return True
|
||||
|
||||
def handle_connect(self):
|
||||
return
|
||||
|
||||
def writable(self):
|
||||
return self.write_buf
|
||||
|
||||
def readable(self):
|
||||
return len(self.read_buf) < self._buf_len
|
||||
|
||||
def handle_read(self):
|
||||
try:
|
||||
(recdata, addr) = self.socket.recvfrom(self._buf_len)
|
||||
except socket.error as e:
|
||||
logger.error("socket error: %s", e)
|
||||
return
|
||||
|
||||
self.destination = Peer(*addr)
|
||||
encodedAddr = protocol.encodeHost(addr[0])
|
||||
self.local = bool(protocol.checkIPAddress(encodedAddr, True))
|
||||
# overwrite the old buffer to avoid mixing data and so that
|
||||
# self.local works correctly
|
||||
self.read_buf[0:] = recdata
|
||||
self.bm_proto_reset()
|
||||
receiveDataQueue.put(self.listening)
|
||||
|
||||
def handle_write(self):
|
||||
try:
|
||||
retval = self.socket.sendto(
|
||||
self.write_buf, ('<broadcast>', self.port))
|
||||
except socket.error as e:
|
||||
logger.error("socket error on sendto: %s", e)
|
||||
if e.errno == 101:
|
||||
self.announcing = False
|
||||
self.socket.close()
|
||||
retval = 0
|
||||
self.slice_write_buf(retval)
|
|
@ -1,71 +0,0 @@
|
|||
"""
|
||||
`UploadThread` class definition
|
||||
"""
|
||||
import time
|
||||
|
||||
import helper_random
|
||||
import protocol
|
||||
from inventory import Inventory
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.dandelion import Dandelion
|
||||
from network.randomtrackingdict import RandomTrackingDict
|
||||
|
||||
from network.threads import StoppableThread
|
||||
|
||||
|
||||
class UploadThread(StoppableThread):
|
||||
"""
|
||||
This is a thread that uploads the objects that the peers requested from me
|
||||
"""
|
||||
maxBufSize = 2097152 # 2MB
|
||||
name = "Uploader"
|
||||
|
||||
def run(self):
|
||||
while not self._stopped:
|
||||
uploaded = 0
|
||||
# Choose uploading peers randomly
|
||||
connections = BMConnectionPool().establishedConnections()
|
||||
helper_random.randomshuffle(connections)
|
||||
for i in connections:
|
||||
now = time.time()
|
||||
# avoid unnecessary delay
|
||||
if i.skipUntil >= now:
|
||||
continue
|
||||
if len(i.write_buf) > self.maxBufSize:
|
||||
continue
|
||||
try:
|
||||
request = i.pendingUpload.randomKeys(
|
||||
RandomTrackingDict.maxPending)
|
||||
except KeyError:
|
||||
continue
|
||||
payload = bytearray()
|
||||
chunk_count = 0
|
||||
for chunk in request:
|
||||
del i.pendingUpload[chunk]
|
||||
if Dandelion().hasHash(chunk) and \
|
||||
i != Dandelion().objectChildStem(chunk):
|
||||
i.antiIntersectionDelay()
|
||||
print
|
||||
self.logger.info(
|
||||
'%s asked for a stem object we didn\'t offer to it.',
|
||||
i.destination)
|
||||
break
|
||||
try:
|
||||
payload.extend(protocol.CreatePacket(
|
||||
'object', Inventory()[chunk].payload))
|
||||
chunk_count += 1
|
||||
except KeyError:
|
||||
i.antiIntersectionDelay()
|
||||
self.logger.info(
|
||||
'%s asked for an object we don\'t have.',
|
||||
i.destination)
|
||||
break
|
||||
if not chunk_count:
|
||||
continue
|
||||
i.append_write_buf(payload)
|
||||
self.logger.debug(
|
||||
'%s:%i Uploading %i objects',
|
||||
i.destination.host, i.destination.port, chunk_count)
|
||||
uploaded += chunk_count
|
||||
if not uploaded:
|
||||
self.stop.wait(1)
|
|
@ -1,254 +0,0 @@
|
|||
"""
|
||||
Some shared functions
|
||||
|
||||
.. deprecated:: 0.6.3
|
||||
Should be moved to different places and this file removed,
|
||||
but it needs refactoring.
|
||||
"""
|
||||
from __future__ import division
|
||||
|
||||
# Libraries.
|
||||
import hashlib
|
||||
import os
|
||||
import stat
|
||||
import subprocess
|
||||
import sys
|
||||
from binascii import hexlify
|
||||
from pyelliptic import arithmetic
|
||||
from kivy.utils import platform
|
||||
|
||||
# Project imports.
|
||||
import highlevelcrypto
|
||||
import state
|
||||
from addresses import decodeAddress, encodeVarint
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from helper_sql import sqlQuery
|
||||
# pylint: disable=logging-format-interpolation
|
||||
|
||||
myECCryptorObjects = {}
|
||||
MyECSubscriptionCryptorObjects = {}
|
||||
# The key in this dictionary is the RIPE hash which is encoded
|
||||
# in an address and value is the address itself.
|
||||
myAddressesByHash = {}
|
||||
# The key in this dictionary is the tag generated from the address.
|
||||
myAddressesByTag = {}
|
||||
broadcastSendersForWhichImWatching = {}
|
||||
|
||||
|
||||
def isAddressInMyAddressBook(address):
|
||||
"""Is address in my addressbook?"""
|
||||
queryreturn = sqlQuery(
|
||||
'''select address from addressbook where address=?''',
|
||||
address)
|
||||
return queryreturn != []
|
||||
|
||||
|
||||
# At this point we should really just have a isAddressInMy(book, address)...
|
||||
def isAddressInMySubscriptionsList(address):
|
||||
"""Am I subscribed to this address?"""
|
||||
queryreturn = sqlQuery(
|
||||
'''select * from subscriptions where address=?''',
|
||||
str(address))
|
||||
return queryreturn != []
|
||||
|
||||
|
||||
def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address):
|
||||
"""
|
||||
Am I subscribed to this address, is it in my addressbook or whitelist?
|
||||
"""
|
||||
if isAddressInMyAddressBook(address):
|
||||
return True
|
||||
|
||||
queryreturn = sqlQuery(
|
||||
'''SELECT address FROM whitelist where address=?'''
|
||||
''' and enabled = '1' ''',
|
||||
address)
|
||||
if queryreturn != []:
|
||||
return True
|
||||
|
||||
queryreturn = sqlQuery(
|
||||
'''select address from subscriptions where address=?'''
|
||||
''' and enabled = '1' ''',
|
||||
address)
|
||||
if queryreturn != []:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def decodeWalletImportFormat(WIFstring):
|
||||
# pylint: disable=inconsistent-return-statements
|
||||
"""
|
||||
Convert private key from base58 that's used in the config file to
|
||||
8-bit binary string
|
||||
"""
|
||||
fullString = arithmetic.changebase(WIFstring, 58, 256)
|
||||
privkey = fullString[:-4]
|
||||
if fullString[-4:] != hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]:
|
||||
logger.critical(
|
||||
'Major problem! When trying to decode one of your'
|
||||
' private keys, the checksum failed. Here are the first'
|
||||
' 6 characters of the PRIVATE key: {}'.format(str(WIFstring)[:6])
|
||||
)
|
||||
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
if privkey[0:1] == '\x80'.encode()[1:]: # checksum passed
|
||||
return privkey[1:]
|
||||
|
||||
logger.critical(
|
||||
'Major problem! When trying to decode one of your private keys,'
|
||||
' the checksum passed but the key doesn\'t begin with hex 80.'
|
||||
' Here is the PRIVATE key: {}'.format(WIFstring)
|
||||
)
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
|
||||
|
||||
def reloadMyAddressHashes():
|
||||
"""Reload keys for user's addresses from the config file"""
|
||||
logger.debug('reloading keys from keys.dat file')
|
||||
|
||||
myECCryptorObjects.clear()
|
||||
myAddressesByHash.clear()
|
||||
myAddressesByTag.clear()
|
||||
# myPrivateKeys.clear()
|
||||
|
||||
keyfileSecure = checkSensitiveFilePermissions(os.path.join(
|
||||
state.appdata, 'keys.dat'))
|
||||
hasEnabledKeys = False
|
||||
for addressInKeysFile in BMConfigParser().addresses(hidden=True):
|
||||
isEnabled = BMConfigParser().safeGet(addressInKeysFile, 'enabled')
|
||||
if isEnabled:
|
||||
hasEnabledKeys = True
|
||||
# status
|
||||
addressVersionNumber, streamNumber, hashobj = decodeAddress(addressInKeysFile)[1:]
|
||||
if addressVersionNumber in (2, 3, 4):
|
||||
# Returns a simple 32 bytes of information encoded
|
||||
# in 64 Hex characters, or null if there was an error.
|
||||
privEncryptionKey = hexlify(decodeWalletImportFormat(
|
||||
BMConfigParser().get(addressInKeysFile, 'privencryptionkey')))
|
||||
# It is 32 bytes encoded as 64 hex characters
|
||||
if len(privEncryptionKey) == 64:
|
||||
myECCryptorObjects[hashobj] = \
|
||||
highlevelcrypto.makeCryptor(privEncryptionKey)
|
||||
myAddressesByHash[hashobj] = addressInKeysFile
|
||||
tag = hashlib.sha512(hashlib.sha512(
|
||||
encodeVarint(addressVersionNumber) +
|
||||
encodeVarint(streamNumber) + hashobj).digest()).digest()[32:]
|
||||
myAddressesByTag[tag] = addressInKeysFile
|
||||
else:
|
||||
logger.error(
|
||||
'Error in reloadMyAddressHashes: Can\'t handle'
|
||||
' address versions other than 2, 3, or 4.\n'
|
||||
)
|
||||
|
||||
if not platform == "android":
|
||||
if not keyfileSecure:
|
||||
fixSensitiveFilePermissions(state.appdata + 'keys.dat', hasEnabledKeys)
|
||||
|
||||
|
||||
def reloadBroadcastSendersForWhichImWatching():
|
||||
"""
|
||||
Reinitialize runtime data for the broadcasts I'm subscribed to
|
||||
from the config file
|
||||
"""
|
||||
broadcastSendersForWhichImWatching.clear()
|
||||
MyECSubscriptionCryptorObjects.clear()
|
||||
queryreturn = sqlQuery('SELECT address FROM subscriptions where enabled=1')
|
||||
logger.debug('reloading subscriptions...')
|
||||
for row in queryreturn:
|
||||
address, = row
|
||||
# status
|
||||
addressVersionNumber, streamNumber, hashobj = decodeAddress(address)[1:]
|
||||
if addressVersionNumber == 2:
|
||||
broadcastSendersForWhichImWatching[hashobj] = 0
|
||||
# Now, for all addresses, even version 2 addresses,
|
||||
# we should create Cryptor objects in a dictionary which we will
|
||||
# use to attempt to decrypt encrypted broadcast messages.
|
||||
if addressVersionNumber <= 3:
|
||||
privEncryptionKey = hashlib.sha512(
|
||||
encodeVarint(addressVersionNumber) +
|
||||
encodeVarint(streamNumber) + hashobj
|
||||
).digest()[:32]
|
||||
MyECSubscriptionCryptorObjects[hashobj] = \
|
||||
highlevelcrypto.makeCryptor(hexlify(privEncryptionKey))
|
||||
else:
|
||||
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(
|
||||
encodeVarint(addressVersionNumber) +
|
||||
encodeVarint(streamNumber) + hashobj
|
||||
).digest()).digest()
|
||||
tag = doubleHashOfAddressData[32:]
|
||||
privEncryptionKey = doubleHashOfAddressData[:32]
|
||||
MyECSubscriptionCryptorObjects[tag] = \
|
||||
highlevelcrypto.makeCryptor(hexlify(privEncryptionKey))
|
||||
|
||||
|
||||
def fixPotentiallyInvalidUTF8Data(text):
|
||||
"""Sanitise invalid UTF-8 strings"""
|
||||
try:
|
||||
unicode(text, 'utf-8')
|
||||
return text
|
||||
except:
|
||||
|
||||
return 'Part of the message is corrupt. The message cannot be' \
|
||||
' displayed the normal way.\n\n' + repr(text)
|
||||
|
||||
|
||||
def checkSensitiveFilePermissions(filename):
|
||||
"""
|
||||
:param str filename: path to the file
|
||||
:return: True if file appears to have appropriate permissions.
|
||||
"""
|
||||
if sys.platform == 'win32':
|
||||
# .. todo:: This might deserve extra checks by someone familiar with
|
||||
# Windows systems.
|
||||
return True
|
||||
elif sys.platform[:7] == 'freebsd':
|
||||
# FreeBSD file systems are the same as major Linux file systems
|
||||
present_permissions = os.stat(filename)[0]
|
||||
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
||||
return present_permissions & disallowed_permissions == 0
|
||||
try:
|
||||
# Skip known problems for non-Win32 filesystems
|
||||
# without POSIX permissions.
|
||||
fstype = subprocess.check_output(
|
||||
'stat -f -c "%%T" %s' % (filename),
|
||||
shell=True,
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
if 'fuseblk'.encode() in fstype:
|
||||
logger.info(
|
||||
'Skipping file permissions check for %s.'
|
||||
' Filesystem fuseblk detected.', filename)
|
||||
return True
|
||||
except:
|
||||
# Swallow exception here, but we might run into trouble later!
|
||||
logger.error('Could not determine filesystem type. %s', filename)
|
||||
present_permissions = os.stat(filename)[0]
|
||||
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
||||
return present_permissions & disallowed_permissions == 0
|
||||
|
||||
|
||||
# Fixes permissions on a sensitive file.
|
||||
def fixSensitiveFilePermissions(filename, hasEnabledKeys):
|
||||
"""Try to change file permissions to be more restrictive"""
|
||||
if hasEnabledKeys:
|
||||
logger.warning(
|
||||
'Keyfile had insecure permissions, and there were enabled'
|
||||
' keys. The truly paranoid should stop using them immediately.')
|
||||
else:
|
||||
logger.warning(
|
||||
'Keyfile had insecure permissions, but there were no enabled keys.'
|
||||
)
|
||||
try:
|
||||
present_permissions = os.stat(filename)[0]
|
||||
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
||||
allowed_permissions = ((1 << 32) - 1) ^ disallowed_permissions
|
||||
new_permissions = (
|
||||
allowed_permissions & present_permissions)
|
||||
os.chmod(filename, new_permissions)
|
||||
|
||||
logger.info('Keyfile permissions automatically fixed.')
|
||||
|
||||
except Exception:
|
||||
logger.exception('Keyfile permissions could not be fixed.')
|
||||
raise
|
|
@ -5,10 +5,6 @@ import threading
|
|||
import time
|
||||
|
||||
from pybitmessage import state
|
||||
# from debug import logger
|
||||
# from helper_sql import sqlQuery, sqlStoredProcedure
|
||||
# from inventory import Inventory
|
||||
# from knownnodes import saveKnownNodes
|
||||
from pybitmessage.network.threads import StoppableThread
|
||||
from pybitmessage.queues import (
|
||||
addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue)
|
||||
|
@ -39,13 +35,12 @@ def doCleanShutdown():
|
|||
'updateStatusBar',
|
||||
'Flushing inventory in memory out to disk.'
|
||||
' This should normally only take a second...'))
|
||||
# Inventory().flush()
|
||||
|
||||
# Verify that the objectProcessor has finished exiting. It should have
|
||||
# incremented the shutdown variable from 1 to 2. This must finish before
|
||||
# we command the sqlThread to exit.
|
||||
while state.shutdown == 1:
|
||||
time.sleep(.1)
|
||||
# while state.shutdown == 1:
|
||||
# time.sleep(.1)
|
||||
|
||||
# Wait long enough to guarantee that any running proof of work worker
|
||||
# threads will check the shutdown variable and exit. If the main thread
|
||||
|
|
Reference in New Issue
Block a user