From 69a7dc594aa8d080deb1f7d13107002638d6714c Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Thu, 3 Oct 2019 12:13:02 +0300 Subject: [PATCH 01/70] Ignore deprecated flake8 W503 --- setup.cfg | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 3236eed1..a4e0547c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,14 +1,17 @@ # Since there is overlap in the violations that the different tools check for, it makes sense to quiesce some warnings # in some tools if those warnings in other tools are preferred. This avoids the need to add duplicate lint warnings. +# max-line-length should be removed ASAP! + [pycodestyle] max-line-length = 119 [flake8] max-line-length = 119 -ignore = E722,F841 +ignore = E722,F841,W503 # E722: pylint is preferred for bare-except # F841: pylint is preferred for unused-variable +# W503: deprecated: https://bugs.python.org/issue26763 - https://www.python.org/dev/peps/pep-0008/#should-a-line-break-before-or-after-a-binary-operator # pylint honours the [MESSAGES CONTROL] section # as well as [MASTER] section From 5cf8ef06cc5262c68c7d2ae00f5b25b23ab69ed8 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Wed, 16 Oct 2019 15:07:39 +0300 Subject: [PATCH 02/70] A symlink for famous setuptools bug https://bitbucket.org/tarek/distribute/issues/177 --- pybitmessage | 1 + 1 file changed, 1 insertion(+) create mode 120000 pybitmessage diff --git a/pybitmessage b/pybitmessage new file mode 120000 index 00000000..e8310385 --- /dev/null +++ b/pybitmessage @@ -0,0 +1 @@ +src \ No newline at end of file From c99997dbb9f0ebe1352357bb5b40ed52e66d8b3f Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Sat, 12 Oct 2019 17:12:19 +0300 Subject: [PATCH 03/70] Fix mistakes in Exception() instantiation --- src/class_smtpServer.py | 11 ++++++----- src/namecoin.py | 4 ++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/class_smtpServer.py b/src/class_smtpServer.py index d87ab69b..99d9c4b3 100644 --- a/src/class_smtpServer.py +++ b/src/class_smtpServer.py @@ -21,6 +21,7 @@ from version import softwareVersion SMTPDOMAIN = "bmaddr.lan" LISTENPORT = 8425 + class smtpServerChannel(smtpd.SMTPChannel): def smtp_EHLO(self, arg): if not arg: @@ -113,9 +114,9 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): try: sender, domain = p.sub(r'\1', mailfrom).split("@") if domain != SMTPDOMAIN: - raise Exception("Bad domain %s", domain) + raise Exception("Bad domain %s" % domain) if sender not in BMConfigParser().addresses(): - raise Exception("Nonexisting user %s", sender) + raise Exception("Nonexisting user %s" % sender) except Exception as err: logger.debug("Bad envelope from %s: %s", mailfrom, repr(err)) msg_from = self.decode_header("from") @@ -123,9 +124,9 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): msg_from = p.sub(r'\1', self.decode_header("from")[0]) sender, domain = msg_from.split("@") if domain != SMTPDOMAIN: - raise Exception("Bad domain %s", domain) + raise Exception("Bad domain %s" % domain) if sender not in BMConfigParser().addresses(): - raise Exception("Nonexisting user %s", sender) + raise Exception("Nonexisting user %s" % sender) except Exception as err: logger.error("Bad headers from %s: %s", msg_from, repr(err)) return @@ -145,7 +146,7 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): try: rcpt, domain = p.sub(r'\1', to).split("@") if domain != SMTPDOMAIN: - raise Exception("Bad domain %s", domain) + raise Exception("Bad domain %s" % domain) logger.debug("Sending %s to %s about %s", sender, rcpt, msg_subject) self.send(sender, rcpt, msg_subject, body) logger.info("Relayed %s to %s", sender, rcpt) diff --git a/src/namecoin.py b/src/namecoin.py index 6674bdcd..579fb0ab 100644 --- a/src/namecoin.py +++ b/src/namecoin.py @@ -258,7 +258,7 @@ class namecoinConnection(object): resp = self.con.getresponse() result = resp.read() if resp.status != 200: - raise Exception("Namecoin returned status %i: %s" % resp.status, resp.reason) + raise Exception("Namecoin returned status %i: %s" % (resp.status, resp.reason)) except: logger.info("HTTP receive error") except: @@ -288,7 +288,7 @@ class namecoinConnection(object): return result except socket.error as exc: - raise Exception("Socket error in RPC connection: %s" % str(exc)) + raise Exception("Socket error in RPC connection: %s" % exc) def lookupNamecoinFolder(): From 9a3a5ec9e8886ae3163ded9059526c69b544acd2 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Wed, 9 Oct 2019 11:46:47 +0300 Subject: [PATCH 04/70] Adjusted conf and rst to fix modindex and get informative index --- docs/conf.py | 114 ++++++++++++++----- docs/contribute.dir/develop.dir/fabric.rst | 2 +- docs/contribute.dir/develop.dir/overview.rst | 2 +- docs/index.rst | 12 +- 4 files changed, 98 insertions(+), 32 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index a4dae7c7..96c9f146 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -2,35 +2,24 @@ """ Configuration file for the Sphinx documentation builder. -This file does only contain a selection of the most common options. For a -full list see the documentation: +For a full list of options see the documentation: http://www.sphinx-doc.org/en/master/config - --- Path setup -------------------------------------------------------------- - -If extensions (or modules to document with autodoc) are in another directory, -add these directories to sys.path here. If the directory is relative to the -documentation root, use os.path.abspath to make it absolute, like shown here. """ import os import sys -from sphinx.apidoc import main -from mock import Mock as MagicMock - -sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath('../src')) -sys.path.insert(0, os.path.abspath('../src/pyelliptic')) -import version +from importlib import import_module + +import version # noqa:E402 # -- Project information ----------------------------------------------------- project = u'PyBitmessage' -copyright = u'2018, The Bitmessage Team' # pylint: disable=redefined-builtin +copyright = u'2019, The Bitmessage Team' # pylint: disable=redefined-builtin author = u'The Bitmessage Team' # The short X.Y version @@ -50,12 +39,13 @@ release = version # ones. extensions = [ 'sphinx.ext.autodoc', - # 'sphinx.ext.doctest', # Currently disabled due to bad doctests + 'sphinx.ext.coverage', # FIXME: unused + 'sphinx.ext.imgmath', # legacy unused 'sphinx.ext.intersphinx', + 'sphinx.ext.linkcode', + 'sphinx.ext.napoleon', 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.imgmath', - 'sphinx.ext.viewcode', + 'sphinxcontrib.apidoc', 'm2r', ] @@ -75,23 +65,29 @@ master_doc = 'index' # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +# language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . -exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ['_build'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' +# Don't prepend every class or function name with full module path +add_module_names = False + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['pybitmessage.'] + # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -114,10 +110,7 @@ html_static_path = ['_static'] # # html_sidebars = {} -# Deal with long lines in source view -html_theme_options = { - 'page_width': '1366px', -} +html_show_sourcelink = False # -- Options for HTMLHelp output --------------------------------------------- @@ -199,10 +192,75 @@ epub_exclude_files = ['search.html'] # -- Extension configuration ------------------------------------------------- +autodoc_mock_imports = [ + 'debug', + 'pybitmessage.bitmessagekivy', + 'pybitmessage.bitmessagemain', + 'pybitmessage.bitmessageqt.addressvalidator', + 'pybitmessage.helper_startup', + 'pybitmessage.network.httpd', + 'pybitmessage.network.https', + 'ctypes', + 'dialog', + 'gi', + 'kivy', + 'logging', + 'msgpack', + 'numpy', + 'pkg_resources', + 'pycanberra', + 'pyopencl', + 'PyQt4', + 'pyxdg', + 'qrcode', + 'stem', +] + +# Apidoc settings +apidoc_module_dir = '../pybitmessage' +apidoc_output_dir = 'autodoc' +apidoc_excluded_paths = [ + 'bitmessagekivy', 'bitmessagemain.py', 'build_osx.py', + 'bitmessageqt/addressvalidator.py', 'bitmessageqt/migrationwizard.py', + 'bitmessageqt/newaddresswizard.py', + 'class_objectProcessor.py', 'defaults.py', 'helper_startup.py', + 'kivymd', 'main.py', 'navigationdrawer', 'network/http*', + 'pybitmessage', 'queues.py', 'tests', 'version.py' +] +apidoc_module_first = True +apidoc_separate_modules = True +apidoc_toc_file = False +apidoc_extra_args = ['-a'] + +# Napoleon settings +napoleon_google_docstring = True + + +# linkcode function +def linkcode_resolve(domain, info): + """This generates source URL's for sphinx.ext.linkcode""" + if domain != 'py' or not info['module']: + return + try: + home = os.path.abspath(import_module('pybitmessage').__path__[0]) + mod = import_module(info['module']).__file__ + except ImportError: + return + repo = 'https://github.com/Bitmessage/PyBitmessage/blob/v0.6/src%s' + path = mod.replace(home, '') + if path != mod: + # put the link only for top level definitions + if len(info['fullname'].split('.')) > 1: + return + if path.endswith('.pyc'): + path = path[:-1] + return repo % path + + # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = {'https://docs.python.org/2.7/': None} # -- Options for todo extension ---------------------------------------------- diff --git a/docs/contribute.dir/develop.dir/fabric.rst b/docs/contribute.dir/develop.dir/fabric.rst index 434ccf7b..8003f33a 100644 --- a/docs/contribute.dir/develop.dir/fabric.rst +++ b/docs/contribute.dir/develop.dir/fabric.rst @@ -1,2 +1,2 @@ -.. mdinclude:: fabfile/README.md +.. mdinclude:: ../../../fabfile/README.md diff --git a/docs/contribute.dir/develop.dir/overview.rst b/docs/contribute.dir/develop.dir/overview.rst index e83d884b..342c9dbb 100644 --- a/docs/contribute.dir/develop.dir/overview.rst +++ b/docs/contribute.dir/develop.dir/overview.rst @@ -62,7 +62,7 @@ To re-build them, run `fab build_docs:dep_graphs=true`. Note that the dot graph .. figure:: ../../../../_static/deps-sfdp.png :alt: SFDP graph of dependencies :width: 100 pc - + :index:`SFDP` graph of dependencies .. figure:: ../../../../_static/deps-dot.png diff --git a/docs/index.rst b/docs/index.rst index 9dddfa28..cc8c9523 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,12 +1,20 @@ .. mdinclude:: ../README.md +Documentation +------------- +.. toctree:: + :maxdepth: 3 + + autodoc/pybitmessage + +Legacy pages +------------ .. toctree:: :maxdepth: 2 - overview usage contribute - + Indices and tables ------------------ From d412e8341b5adf640b98bfa932c56d0c2f476864 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Wed, 9 Oct 2019 11:12:21 +0300 Subject: [PATCH 05/70] Create requirements.txt for readthedocs --- docs/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 docs/requirements.txt diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..55219ec5 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,2 @@ +m2r +sphinxcontrib-apidoc From b5df2421417e4460f3ef6791316616d25454ce0f Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Thu, 10 Oct 2019 16:38:13 +0300 Subject: [PATCH 06/70] Fixed badly formatted docstrings and some wrong text --- src/api.py | 6 +----- src/class_singleCleaner.py | 17 ++++++++-------- src/highlevelcrypto.py | 6 +++--- src/namecoin.py | 41 ++++++++++++++++++-------------------- src/network/bmobject.py | 4 +--- src/protocol.py | 10 +++------- src/pyelliptic/__init__.py | 13 +++++++----- src/pyelliptic/hash.py | 5 +---- src/pyelliptic/openssl.py | 10 ++++------ 9 files changed, 49 insertions(+), 63 deletions(-) diff --git a/src/api.py b/src/api.py index fad5d623..f92abeb4 100644 --- a/src/api.py +++ b/src/api.py @@ -1,19 +1,15 @@ # pylint: disable=too-many-locals,too-many-lines,no-self-use,too-many-public-methods,too-many-branches # pylint: disable=too-many-statements -""" -src/api.py -========== # Copyright (c) 2012-2016 Jonathan Warren # Copyright (c) 2012-2019 The Bitmessage developers +""" This is not what you run to run the Bitmessage API. Instead, enable the API ( https://bitmessage.org/wiki/API ) and optionally enable daemon mode ( https://bitmessage.org/wiki/Daemon ) then run bitmessagemain.py. """ -from __future__ import absolute_import - import base64 import errno import hashlib diff --git a/src/class_singleCleaner.py b/src/class_singleCleaner.py index a5938716..49e15f49 100644 --- a/src/class_singleCleaner.py +++ b/src/class_singleCleaner.py @@ -2,19 +2,20 @@ The singleCleaner class is a timer-driven thread that cleans data structures to free memory, resends messages when a remote node doesn't respond, and sends pong messages to keep connections alive if the network isn't busy. + It cleans these data structures in memory: -inventory (moves data to the on-disk sql database) -inventorySets (clears then reloads data out of sql database) + - inventory (moves data to the on-disk sql database) + - inventorySets (clears then reloads data out of sql database) It cleans these tables on the disk: -inventory (clears expired objects) -pubkeys (clears pubkeys older than 4 weeks old which we have not used - personally) -knownNodes (clears addresses which have not been online for over 3 days) + - inventory (clears expired objects) + - pubkeys (clears pubkeys older than 4 weeks old which we have not used + personally) + - knownNodes (clears addresses which have not been online for over 3 days) It resends messages when there has been no response: -resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...) -resends msg messages in 5 days (then 10 days, then 20 days, etc...) + - resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...) + - resends msg messages in 5 days (then 10 days, then 20 days, etc...) """ diff --git a/src/highlevelcrypto.py b/src/highlevelcrypto.py index 02fb85ab..3d894ae8 100644 --- a/src/highlevelcrypto.py +++ b/src/highlevelcrypto.py @@ -100,9 +100,9 @@ def pointMult(secret): Evidently, this type of error can occur very rarely: - File "highlevelcrypto.py", line 54, in pointMult - group = OpenSSL.EC_KEY_get0_group(k) - WindowsError: exception: access violation reading 0x0000000000000008 + >>> File "highlevelcrypto.py", line 54, in pointMult + >>> group = OpenSSL.EC_KEY_get0_group(k) + >>> WindowsError: exception: access violation reading 0x0000000000000008 """ while True: try: diff --git a/src/namecoin.py b/src/namecoin.py index 579fb0ab..c9238f63 100644 --- a/src/namecoin.py +++ b/src/namecoin.py @@ -1,31 +1,28 @@ # pylint: disable=too-many-branches,protected-access """ Copyright (C) 2013 by Daniel Kraft -This file is part of the Bitmessage project. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -.. todo:: from debug import logger crashes PyBitmessage due to a circular dependency. The debug module will also -override/disable logging.getLogger() # loggers so module level logging functions are used instead +Namecoin queries """ +# This file is part of the Bitmessage project. -from __future__ import absolute_import +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. import base64 import httplib diff --git a/src/network/bmobject.py b/src/network/bmobject.py index 0a4c12b7..e19eaac9 100644 --- a/src/network/bmobject.py +++ b/src/network/bmobject.py @@ -1,7 +1,5 @@ """ -src/network/bmobject.py -====================== - +BMObject and it's exceptions. """ import time diff --git a/src/protocol.py b/src/protocol.py index 1031b950..ef101a72 100644 --- a/src/protocol.py +++ b/src/protocol.py @@ -1,13 +1,8 @@ # pylint: disable=too-many-boolean-expressions,too-many-return-statements,too-many-locals,too-many-statements """ -protocol.py -=========== - Low-level protocol-related functions. """ -from __future__ import absolute_import - import base64 import hashlib import random @@ -205,12 +200,13 @@ def isProofOfWorkSufficient(data, recvTime=0): """ Validate an object's Proof of Work using method described in: - https://bitmessage.org/wiki/Proof_of_work + https://bitmessage.org/wiki/Proof_of_work + Arguments: int nonceTrialsPerByte (default: from default.py) int payloadLengthExtraBytes (default: from default.py) float recvTime (optional) UNIX epoch time when object was - received from the network (default: current system time) + received from the network (default: current system time) Returns: True if PoW valid and sufficient, False in all other cases """ diff --git a/src/pyelliptic/__init__.py b/src/pyelliptic/__init__.py index 7aa666e0..65279ded 100644 --- a/src/pyelliptic/__init__.py +++ b/src/pyelliptic/__init__.py @@ -1,10 +1,13 @@ """ -src/pyelliptic/__init__.py -===================================== +Copyright (C) 2010 +Author: Yann GUIBET +Contact: + +Python OpenSSL wrapper. +For modern cryptography with ECC, AES, HMAC, Blowfish, ... + +This is an abandoned package maintained inside of the PyBitmessage. """ -# Copyright (C) 2010 -# Author: Yann GUIBET -# Contact: from .openssl import OpenSSL from .ecc import ECC diff --git a/src/pyelliptic/hash.py b/src/pyelliptic/hash.py index c21dd6a4..f098d631 100644 --- a/src/pyelliptic/hash.py +++ b/src/pyelliptic/hash.py @@ -1,8 +1,5 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- """ -src/pyelliptic/hash.py -===================== +Wrappers for hash functions from OpenSSL. """ # Copyright (C) 2011 Yann GUIBET # See LICENSE for details. diff --git a/src/pyelliptic/openssl.py b/src/pyelliptic/openssl.py index fcde01ec..152a780c 100644 --- a/src/pyelliptic/openssl.py +++ b/src/pyelliptic/openssl.py @@ -1,14 +1,12 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -src/pyelliptic/openssl.py -===================== -""" # Copyright (C) 2011 Yann GUIBET # See LICENSE for details. # # Software slightly changed by Jonathan Warren # pylint: disable=protected-access +""" +This module loads openssl libs with ctypes and incapsulates +needed openssl functionality in class _OpenSSL. +""" import sys import ctypes From 4d15c8e590309a0c009ca6a2c8b763e9ef0e526b Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Sun, 13 Oct 2019 13:10:06 +0300 Subject: [PATCH 07/70] Fix fallback package docstring --- src/fallback/__init__.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/fallback/__init__.py b/src/fallback/__init__.py index d45c754d..9a8d646f 100644 --- a/src/fallback/__init__.py +++ b/src/fallback/__init__.py @@ -1,13 +1,19 @@ """ -.. todo:: hello world +Fallback expressions help PyBitmessage modules to run without some external +dependencies. + + +RIPEMD160Hash +------------- + +We need to check :mod:`hashlib` for RIPEMD-160, as it won't be available +if OpenSSL is not linked against or the linked OpenSSL has RIPEMD disabled. +Try to use `pycryptodome `_ +in that case. """ import hashlib -# We need to check hashlib for RIPEMD-160, as it won't be available -# if OpenSSL is not linked against or the linked OpenSSL has RIPEMD -# disabled. - try: hashlib.new('ripemd160') except ValueError: From 53cc08edec9d092c525537f873d0fb05b25c79b8 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Thu, 10 Oct 2019 17:31:02 +0300 Subject: [PATCH 08/70] Renamed invalid python module http-old --- src/network/{http-old.py => http_old.py} | 4 ---- 1 file changed, 4 deletions(-) rename src/network/{http-old.py => http_old.py} (96%) diff --git a/src/network/http-old.py b/src/network/http_old.py similarity index 96% rename from src/network/http-old.py rename to src/network/http_old.py index c97927d9..64d09983 100644 --- a/src/network/http-old.py +++ b/src/network/http_old.py @@ -1,7 +1,3 @@ -""" -src/network/http-old.py -======================= -""" import asyncore import socket import time From 9e72e3b2afa858dbca3e50535f88110f65d8f16b Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Fri, 11 Oct 2019 14:14:08 +0300 Subject: [PATCH 09/70] Rewritten epytext strings in qidenticon and removed __all__ --- src/qidenticon.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/src/qidenticon.py b/src/qidenticon.py index 8db8430a..deafc570 100644 --- a/src/qidenticon.py +++ b/src/qidenticon.py @@ -1,23 +1,19 @@ -#!/usr/bin/env python -# -*- coding:utf-8 -*- # pylint: disable=too-many-locals,too-many-arguments,too-many-function-args """ -= usage = +Usage +----- -== python == >>> import qtidenticon >>> qtidenticon.render_identicon(code, size) Return a PIL Image class instance which have generated identicon image. -```size``` specifies `patch size`. Generated image size is 3 * ```size```. +``size`` specifies `patch size`. Generated image size is 3 * ``size``. """ from PyQt4 import QtGui from PyQt4.QtCore import QSize, QPointF, Qt from PyQt4.QtGui import QPixmap, QPainter, QPolygonF -__all__ = ['render_identicon', 'IdenticonRendererBase'] - class IdenticonRendererBase(object): """Encapsulate methods around rendering identicons""" @@ -26,7 +22,7 @@ class IdenticonRendererBase(object): def __init__(self, code): """ - @param code code for icon + :param code: code for icon """ if not isinstance(code, int): code = int(code) @@ -36,8 +32,8 @@ class IdenticonRendererBase(object): """ render identicon to QPicture - @param size identicon patchsize. (image size is 3 * [size]) - @return QPicture + :param size: identicon patchsize. (image size is 3 * [size]) + :returns: :class:`QPicture` """ # decode the code @@ -79,7 +75,7 @@ class IdenticonRendererBase(object): def drawPatchQt(self, pos, turn, invert, patch_type, image, size, foreColor, backColor, penwidth): # pylint: disable=unused-argument """ - @param size patch size + :param size: patch size """ path = self.PATH_SET[patch_type] if not path: @@ -134,7 +130,7 @@ class IdenticonRendererBase(object): class DonRenderer(IdenticonRendererBase): """ Don Park's implementation of identicon - see : http://www.docuverse.com/blog/donpark/2007/01/19/identicon-updated-and-source-released + see: http://www.docuverse.com/blog/donpark/2007/01/19/identicon-updated-and-source-released """ PATH_SET = [ From 7ba296a6fe6c6eef601de6a2725b917d076cfe23 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Fri, 11 Oct 2019 17:15:26 +0300 Subject: [PATCH 10/70] Remove "Edit on Github" link: https://github.com/sphinx-doc/sphinx/issues/2386 --- docs/_static/custom.css | 4 ++++ docs/conf.py | 4 ++++ 2 files changed, 8 insertions(+) create mode 100644 docs/_static/custom.css diff --git a/docs/_static/custom.css b/docs/_static/custom.css new file mode 100644 index 00000000..5192985c --- /dev/null +++ b/docs/_static/custom.css @@ -0,0 +1,4 @@ +/* Hide "On GitHub" section from versions menu */ +li.wy-breadcrumbs-aside > a.fa { + display: none; +} diff --git a/docs/conf.py b/docs/conf.py index 96c9f146..b6e75cc1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -100,6 +100,10 @@ html_theme = 'sphinx_rtd_theme' # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] +html_css_files = [ + 'custom.css', +] + # Custom sidebar templates, must be a dictionary that maps document names # to template names. # From 86932617bd50d8b46070845d366093af53e2a242 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Fri, 11 Oct 2019 18:50:35 +0300 Subject: [PATCH 11/70] Add setuptools sphinx integration --- .gitignore | 1 + setup.py | 14 ++++++-------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index 2bcb5340..701bb079 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,5 @@ dist *.egg-info docs/_*/* docs/autodoc/ +build/sphinx/ pyan/ diff --git a/setup.py b/setup.py index 61afa91e..3e585b6b 100644 --- a/setup.py +++ b/setup.py @@ -17,13 +17,7 @@ EXTRAS_REQUIRE = { 'qrcode': ['qrcode'], 'sound;platform_system=="Windows"': ['winsound'], 'tor': ['stem'], - 'docs': [ - 'sphinx', # fab build_docs - 'graphviz', # fab build_docs - 'curses', # src/depends.py - 'python2-pythondialog', # src/depends.py - 'm2r', # fab build_docs - ] + 'docs': ['sphinx', 'sphinxcontrib-apidoc', 'm2r'] } @@ -155,5 +149,9 @@ if __name__ == "__main__": # ] }, scripts=['src/pybitmessage'], - cmdclass={'install': InstallCmd} + cmdclass={'install': InstallCmd}, + command_options={ + 'build_sphinx': { + 'source_dir': ('setup.py', 'docs')} + } ) From 5cf4d8a946db2095156f8653b69e7b06beabbee9 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Tue, 15 Oct 2019 15:01:43 +0300 Subject: [PATCH 12/70] Update README --- README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c3dcb540..17049e7a 100644 --- a/README.md +++ b/README.md @@ -14,12 +14,10 @@ Development ---------- Bitmessage is a collaborative project. You are welcome to submit pull requests although if you plan to put a non-trivial amount of work into coding new -features, it is recommended that you first solicit feedback on the DevTalk -pseudo-mailing list: -BM-2D9QKN4teYRvoq2fyzpiftPh9WP9qggtzh +features, it is recommended that you first describe your ideas in the +separate issue. -Feel welcome to join chan "bitmessage", BM-2cWy7cvHoq3f1rYMerRJp8PT653jjSuEdY -which is on preview here: https://beamstat.com/chan/bitmessage +Feel welcome to join chan "bitmessage", BM-2cWy7cvHoq3f1rYMerRJp8PT653jjSuEdY References ---------- From 86f0860cb21770e15791c615bf2a566be7e72db3 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Wed, 16 Oct 2019 14:42:16 +0300 Subject: [PATCH 13/70] Slightly rewritten docstrings in singleinstance --- src/singleinstance.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/singleinstance.py b/src/singleinstance.py index c2def912..03bda504 100644 --- a/src/singleinstance.py +++ b/src/singleinstance.py @@ -1,8 +1,13 @@ -#! /usr/bin/env python +""" +This is based upon the singleton class from +`tendo `_ +which is under the Python Software Foundation License version 2 +""" import atexit import os import sys + import state try: @@ -15,10 +20,6 @@ class singleinstance: """ Implements a single instance application by creating a lock file at appdata. - - This is based upon the singleton class from tendo - https://github.com/pycontribs/tendo - which is under the Python Software Foundation License version 2 """ def __init__(self, flavor_id="", daemon=False): self.initialized = False From c63ed02153869aeefb35502df8e60c29dba68b2f Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Wed, 16 Oct 2019 17:53:37 +0300 Subject: [PATCH 14/70] Minimal changes to document Singleton and class definitions it wraps --- src/bmconfigparser.py | 6 ++++-- src/singleton.py | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/bmconfigparser.py b/src/bmconfigparser.py index 726d32eb..7f28d1b8 100644 --- a/src/bmconfigparser.py +++ b/src/bmconfigparser.py @@ -43,8 +43,10 @@ BMConfigDefaults = { @Singleton class BMConfigParser(ConfigParser.SafeConfigParser): - """Singleton class inherited from ConfigParser.SafeConfigParser - with additional methods specific to bitmessage config.""" + """ + Singleton class inherited from :class:`ConfigParser.SafeConfigParser` + with additional methods specific to bitmessage config. + """ _temp = {} diff --git a/src/singleton.py b/src/singleton.py index 1eef08e1..5c6c43be 100644 --- a/src/singleton.py +++ b/src/singleton.py @@ -1,6 +1,21 @@ +""" +Singleton decorator definition +""" + +from functools import wraps + + def Singleton(cls): + """ + Decorator implementing the singleton pattern: + it restricts the instantiation of a class to one "single" instance. + """ instances = {} + + # https://github.com/sphinx-doc/sphinx/issues/3783 + @wraps(cls) def getinstance(): + """Find an instance or save newly created one""" if cls not in instances: instances[cls] = cls() return instances[cls] From 7a89109fc917c5af530c9643aa8faca32ab99cf8 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Tue, 6 Aug 2019 14:04:33 +0300 Subject: [PATCH 15/70] New logging approach in order to reduce imports from submodules and use logging without risk of circular import. Only subpackage that imports from debug is bitmessageqt - because it also uses debug.resetLogging(). Instead of from debug import logger is now recommended to use: import logging logger = logging.getLogger('default') All subclasses of StoppableThread now have a logger attribute. All threading related stuff except for set_thread_name() was moved from helper_threading to network.threads. Fixed two my mistakes from previous edit of debug in a1a8d3a: - logger.handlers is not dict but iterable - sys.excepthook should be set unconditionally --- src/api.py | 2 +- src/bitmessagemain.py | 2 +- src/class_addressGenerator.py | 21 ++++--- src/class_singleCleaner.py | 93 +++++++++++++++---------------- src/class_singleWorker.py | 84 ++++++++++++++-------------- src/class_smtpDeliver.py | 14 ++--- src/class_smtpServer.py | 46 ++++++++------- src/debug.py | 86 ++++++++++++++++------------ src/helper_threading.py | 39 +------------ src/messagetypes/__init__.py | 10 ++-- src/messagetypes/message.py | 9 ++- src/messagetypes/vote.py | 9 ++- src/network/addrthread.py | 4 +- src/network/advanceddispatcher.py | 6 +- src/network/announcethread.py | 10 ++-- src/network/bmobject.py | 4 +- src/network/bmproto.py | 4 +- src/network/connectionchooser.py | 4 +- src/network/connectionpool.py | 4 +- src/network/dandelion.py | 6 +- src/network/downloadthread.py | 9 +-- src/network/invthread.py | 2 +- src/network/networkthread.py | 11 +--- src/network/proxy.py | 7 ++- src/network/receivequeuethread.py | 19 +++---- src/network/tcp.py | 4 +- src/network/threads.py | 49 ++++++++++++++++ src/network/tls.py | 3 +- src/network/udp.py | 12 ++-- src/network/uploadthread.py | 31 ++++++----- src/shutdown.py | 14 ++--- src/upnp.py | 2 +- 32 files changed, 328 insertions(+), 292 deletions(-) create mode 100644 src/network/threads.py diff --git a/src/api.py b/src/api.py index f92abeb4..b7f5c62d 100644 --- a/src/api.py +++ b/src/api.py @@ -38,8 +38,8 @@ from bmconfigparser import BMConfigParser from debug import logger from helper_ackPayload import genAckPayload from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery, sqlStoredProcedure -from helper_threading import StoppableThread from inventory import Inventory +from network.threads import StoppableThread str_chan = '[chan]' diff --git a/src/bitmessagemain.py b/src/bitmessagemain.py index 1dd2f271..4ad9311f 100755 --- a/src/bitmessagemain.py +++ b/src/bitmessagemain.py @@ -41,7 +41,7 @@ import shared import knownnodes import state import shutdown -from debug import logger +from debug import logger # this should go before any threads # Classes from class_sqlThread import sqlThread diff --git a/src/class_addressGenerator.py b/src/class_addressGenerator.py index d930fc99..fa268377 100644 --- a/src/class_addressGenerator.py +++ b/src/class_addressGenerator.py @@ -12,10 +12,9 @@ import shared import defaults import highlevelcrypto from bmconfigparser import BMConfigParser -from debug import logger from addresses import decodeAddress, encodeAddress, encodeVarint from fallback import RIPEMD160Hash -from helper_threading import StoppableThread +from network.threads import StoppableThread class addressGenerator(StoppableThread): @@ -85,12 +84,12 @@ class addressGenerator(StoppableThread): elif queueValue[0] == 'stopThread': break else: - logger.error( + self.logger.error( 'Programming error: A structure with the wrong number' ' of values was passed into the addressGeneratorQueue.' ' Here is the queueValue: %r\n', queueValue) if addressVersionNumber < 3 or addressVersionNumber > 4: - logger.error( + self.logger.error( 'Program error: For some reason the address generator' ' queue has been given a request to create at least' ' one version %s address which it cannot do.\n', @@ -139,10 +138,10 @@ class addressGenerator(StoppableThread): '\x00' * numberOfNullBytesDemandedOnFrontOfRipeHash ): break - logger.info( + self.logger.info( 'Generated address with ripe digest: %s', hexlify(ripe)) try: - logger.info( + self.logger.info( 'Address generator calculated %s addresses at %s' ' addresses per second before finding one with' ' the correct ripe-prefix.', @@ -210,7 +209,7 @@ class addressGenerator(StoppableThread): or command == 'getDeterministicAddress' \ or command == 'createChan' or command == 'joinChan': if len(deterministicPassphrase) == 0: - logger.warning( + self.logger.warning( 'You are creating deterministic' ' address(es) using a blank passphrase.' ' Bitmessage will do it but it is rather stupid.') @@ -263,10 +262,10 @@ class addressGenerator(StoppableThread): ): break - logger.info( + self.logger.info( 'Generated address with ripe digest: %s', hexlify(ripe)) try: - logger.info( + self.logger.info( 'Address generator calculated %s addresses' ' at %s addresses per second before finding' ' one with the correct ripe-prefix.', @@ -316,7 +315,7 @@ class addressGenerator(StoppableThread): addressAlreadyExists = True if addressAlreadyExists: - logger.info( + self.logger.info( '%s already exists. Not adding it again.', address ) @@ -329,7 +328,7 @@ class addressGenerator(StoppableThread): ).arg(address) )) else: - logger.debug('label: %s', label) + self.logger.debug('label: %s', label) BMConfigParser().set(address, 'label', label) BMConfigParser().set(address, 'enabled', 'true') BMConfigParser().set(address, 'decoy', 'false') diff --git a/src/class_singleCleaner.py b/src/class_singleCleaner.py index 49e15f49..fc53a5b0 100644 --- a/src/class_singleCleaner.py +++ b/src/class_singleCleaner.py @@ -24,16 +24,15 @@ import os import shared import time -import tr -from bmconfigparser import BMConfigParser -from helper_sql import sqlQuery, sqlExecute -from helper_threading import StoppableThread -from inventory import Inventory -from network.connectionpool import BMConnectionPool -from debug import logger import knownnodes import queues import state +import tr +from bmconfigparser import BMConfigParser +from helper_sql import sqlQuery, sqlExecute +from inventory import Inventory +from network.connectionpool import BMConnectionPool +from network.threads import StoppableThread class singleCleaner(StoppableThread): @@ -99,7 +98,7 @@ class singleCleaner(StoppableThread): ) for row in queryreturn: if len(row) < 2: - logger.error( + self.logger.error( 'Something went wrong in the singleCleaner thread:' ' a query did not return the requested fields. %r', row @@ -108,9 +107,9 @@ class singleCleaner(StoppableThread): break toAddress, ackData, status = row if status == 'awaitingpubkey': - resendPubkeyRequest(toAddress) + self.resendPubkeyRequest(toAddress) elif status == 'msgsent': - resendMsg(ackData) + self.resendMsg(ackData) try: # Cleanup knownnodes and handle possible severe exception @@ -118,7 +117,7 @@ class singleCleaner(StoppableThread): knownnodes.cleanupKnownNodes() except Exception as err: if "Errno 28" in str(err): - logger.fatal( + self.logger.fatal( '(while writing knownnodes to disk)' ' Alert: Your disk or data storage volume is full.' ) @@ -161,41 +160,41 @@ class singleCleaner(StoppableThread): if state.shutdown == 0: self.stop.wait(singleCleaner.cycleLength) + def resendPubkeyRequest(self, address): + """Resend pubkey request for address""" + self.logger.debug( + 'It has been a long time and we haven\'t heard a response to our' + ' getpubkey request. Sending again.' + ) + try: + # We need to take this entry out of the neededPubkeys structure + # because the queues.workerQueue checks to see whether the entry + # is already present and will not do the POW and send the message + # because it assumes that it has already done it recently. + del state.neededPubkeys[address] + except: + pass -def resendPubkeyRequest(address): - logger.debug( - 'It has been a long time and we haven\'t heard a response to our' - ' getpubkey request. Sending again.' - ) - try: - # We need to take this entry out of the neededPubkeys structure - # because the queues.workerQueue checks to see whether the entry - # is already present and will not do the POW and send the message - # because it assumes that it has already done it recently. - del state.neededPubkeys[address] - except: - pass + queues.UISignalQueue.put(( + 'updateStatusBar', + 'Doing work necessary to again attempt to request a public key...' + )) + sqlExecute( + '''UPDATE sent SET status='msgqueued' WHERE toaddress=?''', + address) + queues.workerQueue.put(('sendmessage', '')) - queues.UISignalQueue.put(( - 'updateStatusBar', - 'Doing work necessary to again attempt to request a public key...' - )) - sqlExecute( - '''UPDATE sent SET status='msgqueued' WHERE toaddress=?''', - address) - queues.workerQueue.put(('sendmessage', '')) - - -def resendMsg(ackdata): - logger.debug( - 'It has been a long time and we haven\'t heard an acknowledgement' - ' to our msg. Sending again.' - ) - sqlExecute( - '''UPDATE sent SET status='msgqueued' WHERE ackdata=?''', - ackdata) - queues.workerQueue.put(('sendmessage', '')) - queues.UISignalQueue.put(( - 'updateStatusBar', - 'Doing work necessary to again attempt to deliver a message...' - )) + def resendMsg(self, ackdata): + """Resend message by ackdata""" + self.logger.debug( + 'It has been a long time and we haven\'t heard an acknowledgement' + ' to our msg. Sending again.' + ) + sqlExecute( + '''UPDATE sent SET status='msgqueued' WHERE ackdata=?''', + ackdata) + queues.workerQueue.put(('sendmessage', '')) + queues.UISignalQueue.put(( + 'updateStatusBar', + 'Doing work necessary to again attempt to deliver a message...' + )) diff --git a/src/class_singleWorker.py b/src/class_singleWorker.py index 0798296e..77fa18c0 100644 --- a/src/class_singleWorker.py +++ b/src/class_singleWorker.py @@ -26,10 +26,9 @@ import state import tr from addresses import calculateInventoryHash, decodeAddress, decodeVarint, encodeVarint from bmconfigparser import BMConfigParser -from debug import logger from helper_sql import sqlExecute, sqlQuery -from helper_threading import StoppableThread from inventory import Inventory +from network.threads import StoppableThread def sizeof_fmt(num, suffix='h/s'): @@ -98,7 +97,7 @@ class singleWorker(StoppableThread): '''SELECT ackdata FROM sent WHERE status = 'msgsent' ''') for row in queryreturn: ackdata, = row - logger.info('Watching for ackdata %s', hexlify(ackdata)) + self.logger.info('Watching for ackdata %s', hexlify(ackdata)) shared.ackdataForWhichImWatching[ackdata] = 0 # Fix legacy (headerless) watched ackdata to include header @@ -173,14 +172,14 @@ class singleWorker(StoppableThread): self.busy = 0 return else: - logger.error( + self.logger.error( 'Probable programming error: The command sent' ' to the workerThread is weird. It is: %s\n', command ) queues.workerQueue.task_done() - logger.info("Quitting...") + self.logger.info("Quitting...") def _getKeysForAddress(self, address): privSigningKeyBase58 = BMConfigParser().get( @@ -217,25 +216,24 @@ class singleWorker(StoppableThread): )) / (2 ** 16)) )) initialHash = hashlib.sha512(payload).digest() - logger.info( + self.logger.info( '%s Doing proof of work... TTL set to %s', log_prefix, TTL) if log_time: start_time = time.time() trialValue, nonce = proofofwork.run(target, initialHash) - logger.info( + self.logger.info( '%s Found proof of work %s Nonce: %s', log_prefix, trialValue, nonce ) try: delta = time.time() - start_time - logger.info( + self.logger.info( 'PoW took %.1f seconds, speed %s.', delta, sizeof_fmt(nonce / delta) ) except: # NameError pass payload = pack('>Q', nonce) + payload - # inventoryHash = calculateInventoryHash(payload) return payload def doPOWForMyV2Pubkey(self, adressHash): @@ -260,7 +258,7 @@ class singleWorker(StoppableThread): _, _, pubSigningKey, pubEncryptionKey = \ self._getKeysForAddress(myAddress) except Exception as err: - logger.error( + self.logger.error( 'Error within doPOWForMyV2Pubkey. Could not read' ' the keys from the keys.dat file for a requested' ' address. %s\n', err @@ -278,7 +276,8 @@ class singleWorker(StoppableThread): Inventory()[inventoryHash] = ( objectType, streamNumber, payload, embeddedTime, '') - logger.info('broadcasting inv with hash: %s', hexlify(inventoryHash)) + self.logger.info( + 'broadcasting inv with hash: %s', hexlify(inventoryHash)) queues.invQueue.put((streamNumber, inventoryHash)) queues.UISignalQueue.put(('updateStatusBar', '')) @@ -303,7 +302,7 @@ class singleWorker(StoppableThread): # The address has been deleted. return if BMConfigParser().safeGetBoolean(myAddress, 'chan'): - logger.info('This is a chan address. Not sending pubkey.') + self.logger.info('This is a chan address. Not sending pubkey.') return _, addressVersionNumber, streamNumber, adressHash = decodeAddress( myAddress) @@ -333,7 +332,7 @@ class singleWorker(StoppableThread): privSigningKeyHex, _, pubSigningKey, pubEncryptionKey = \ self._getKeysForAddress(myAddress) except Exception as err: - logger.error( + self.logger.error( 'Error within sendOutOrStoreMyV3Pubkey. Could not read' ' the keys from the keys.dat file for a requested' ' address. %s\n', err @@ -360,7 +359,8 @@ class singleWorker(StoppableThread): Inventory()[inventoryHash] = ( objectType, streamNumber, payload, embeddedTime, '') - logger.info('broadcasting inv with hash: %s', hexlify(inventoryHash)) + self.logger.info( + 'broadcasting inv with hash: %s', hexlify(inventoryHash)) queues.invQueue.put((streamNumber, inventoryHash)) queues.UISignalQueue.put(('updateStatusBar', '')) @@ -383,7 +383,7 @@ class singleWorker(StoppableThread): # The address has been deleted. return if shared.BMConfigParser().safeGetBoolean(myAddress, 'chan'): - logger.info('This is a chan address. Not sending pubkey.') + self.logger.info('This is a chan address. Not sending pubkey.') return _, addressVersionNumber, streamNumber, addressHash = decodeAddress( myAddress) @@ -402,7 +402,7 @@ class singleWorker(StoppableThread): privSigningKeyHex, _, pubSigningKey, pubEncryptionKey = \ self._getKeysForAddress(myAddress) except Exception as err: - logger.error( + self.logger.error( 'Error within sendOutOrStoreMyV4Pubkey. Could not read' ' the keys from the keys.dat file for a requested' ' address. %s\n', err @@ -450,7 +450,8 @@ class singleWorker(StoppableThread): doubleHashOfAddressData[32:] ) - logger.info('broadcasting inv with hash: %s', hexlify(inventoryHash)) + self.logger.info( + 'broadcasting inv with hash: %s', hexlify(inventoryHash)) queues.invQueue.put((streamNumber, inventoryHash)) queues.UISignalQueue.put(('updateStatusBar', '')) @@ -459,7 +460,7 @@ class singleWorker(StoppableThread): myAddress, 'lastpubkeysendtime', str(int(time.time()))) BMConfigParser().save() except Exception as err: - logger.error( + self.logger.error( 'Error: Couldn\'t add the lastpubkeysendtime' ' to the keys.dat file. Error message: %s', err ) @@ -497,7 +498,7 @@ class singleWorker(StoppableThread): objectType, streamNumber, buffer(payload), embeddedTime, buffer(tag) ) - logger.info( + self.logger.info( 'sending inv (within sendOnionPeerObj function) for object: %s', hexlify(inventoryHash)) queues.invQueue.put((streamNumber, inventoryHash)) @@ -520,7 +521,7 @@ class singleWorker(StoppableThread): _, addressVersionNumber, streamNumber, ripe = \ decodeAddress(fromaddress) if addressVersionNumber <= 1: - logger.error( + self.logger.error( 'Error: In the singleWorker thread, the ' ' sendBroadcast function doesn\'t understand' ' the address version.\n') @@ -636,7 +637,7 @@ class singleWorker(StoppableThread): # to not let the user try to send a message this large # until we implement message continuation. if len(payload) > 2 ** 18: # 256 KiB - logger.critical( + self.logger.critical( 'This broadcast object is too large to send.' ' This should never happen. Object size: %s', len(payload) @@ -647,7 +648,7 @@ class singleWorker(StoppableThread): objectType = 3 Inventory()[inventoryHash] = ( objectType, streamNumber, payload, embeddedTime, tag) - logger.info( + self.logger.info( 'sending inv (within sendBroadcast function)' ' for object: %s', hexlify(inventoryHash) @@ -867,8 +868,8 @@ class singleWorker(StoppableThread): "MainWindow", "Looking up the receiver\'s public key")) )) - logger.info('Sending a message.') - logger.debug( + self.logger.info('Sending a message.') + self.logger.debug( 'First 150 characters of message: %s', repr(message[:150]) ) @@ -912,7 +913,7 @@ class singleWorker(StoppableThread): if not shared.BMConfigParser().safeGetBoolean( 'bitmessagesettings', 'willinglysendtomobile' ): - logger.info( + self.logger.info( 'The receiver is a mobile user but the' ' sender (you) has not selected that you' ' are willing to send to mobiles. Aborting' @@ -978,7 +979,7 @@ class singleWorker(StoppableThread): defaults.networkDefaultPayloadLengthExtraBytes: requiredPayloadLengthExtraBytes = \ defaults.networkDefaultPayloadLengthExtraBytes - logger.debug( + self.logger.debug( 'Using averageProofOfWorkNonceTrialsPerByte: %s' ' and payloadLengthExtraBytes: %s.', requiredAverageProofOfWorkNonceTrialsPerByte, @@ -1043,8 +1044,9 @@ class singleWorker(StoppableThread): l10n.formatTimestamp())))) continue else: # if we are sending a message to ourselves or a chan.. - logger.info('Sending a message.') - logger.debug('First 150 characters of message: %r', message[:150]) + self.logger.info('Sending a message.') + self.logger.debug( + 'First 150 characters of message: %r', message[:150]) behaviorBitfield = protocol.getBitfield(fromaddress) try: @@ -1063,7 +1065,7 @@ class singleWorker(StoppableThread): " message. %1" ).arg(l10n.formatTimestamp())) )) - logger.error( + self.logger.error( 'Error within sendMsg. Could not read the keys' ' from the keys.dat file for our own address. %s\n', err) @@ -1139,14 +1141,14 @@ class singleWorker(StoppableThread): payload += encodeVarint(encodedMessage.length) payload += encodedMessage.data if BMConfigParser().has_section(toaddress): - logger.info( + self.logger.info( 'Not bothering to include ackdata because we are' ' sending to ourselves or a chan.' ) fullAckPayload = '' elif not protocol.checkBitfield( behaviorBitfield, protocol.BITFIELD_DOESACK): - logger.info( + self.logger.info( 'Not bothering to include ackdata because' ' the receiver said that they won\'t relay it anyway.' ) @@ -1199,7 +1201,7 @@ class singleWorker(StoppableThread): requiredPayloadLengthExtraBytes )) / (2 ** 16)) )) - logger.info( + self.logger.info( '(For msg message) Doing proof of work. Total required' ' difficulty: %f. Required small message difficulty: %f.', float(requiredAverageProofOfWorkNonceTrialsPerByte) / @@ -1211,12 +1213,12 @@ class singleWorker(StoppableThread): powStartTime = time.time() initialHash = hashlib.sha512(encryptedPayload).digest() trialValue, nonce = proofofwork.run(target, initialHash) - logger.info( + self.logger.info( '(For msg message) Found proof of work %s Nonce: %s', trialValue, nonce ) try: - logger.info( + self.logger.info( 'PoW took %.1f seconds, speed %s.', time.time() - powStartTime, sizeof_fmt(nonce / (time.time() - powStartTime)) @@ -1231,7 +1233,7 @@ class singleWorker(StoppableThread): # in the code to not let the user try to send a message # this large until we implement message continuation. if len(encryptedPayload) > 2 ** 18: # 256 KiB - logger.critical( + self.logger.critical( 'This msg object is too large to send. This should' ' never happen. Object size: %i', len(encryptedPayload) @@ -1262,7 +1264,7 @@ class singleWorker(StoppableThread): " Sent on %1" ).arg(l10n.formatTimestamp())) )) - logger.info( + self.logger.info( 'Broadcasting inv for my msg(within sendmsg function): %s', hexlify(inventoryHash) ) @@ -1315,7 +1317,7 @@ class singleWorker(StoppableThread): toStatus, addressVersionNumber, streamNumber, ripe = decodeAddress( toAddress) if toStatus != 'success': - logger.error( + self.logger.error( 'Very abnormal error occurred in requestPubKey.' ' toAddress is: %r. Please report this error to Atheros.', toAddress @@ -1329,7 +1331,7 @@ class singleWorker(StoppableThread): toAddress ) if not queryReturn: - logger.critical( + self.logger.critical( 'BUG: Why are we requesting the pubkey for %s' ' if there are no messages in the sent folder' ' to that address?', toAddress @@ -1377,11 +1379,11 @@ class singleWorker(StoppableThread): payload += encodeVarint(streamNumber) if addressVersionNumber <= 3: payload += ripe - logger.info( + self.logger.info( 'making request for pubkey with ripe: %s', hexlify(ripe)) else: payload += tag - logger.info( + self.logger.info( 'making request for v4 pubkey with tag: %s', hexlify(tag)) # print 'trial value', trialValue @@ -1402,7 +1404,7 @@ class singleWorker(StoppableThread): objectType = 1 Inventory()[inventoryHash] = ( objectType, streamNumber, payload, embeddedTime, '') - logger.info('sending inv (for the getpubkey message)') + self.logger.info('sending inv (for the getpubkey message)') queues.invQueue.put((streamNumber, inventoryHash)) # wait 10% past expiration diff --git a/src/class_smtpDeliver.py b/src/class_smtpDeliver.py index fa607220..58cd4631 100644 --- a/src/class_smtpDeliver.py +++ b/src/class_smtpDeliver.py @@ -5,7 +5,6 @@ src/class_smtpDeliver.py # pylint: disable=unused-variable import smtplib -import sys import urlparse from email.header import Header from email.mime.text import MIMEText @@ -13,8 +12,7 @@ from email.mime.text import MIMEText import queues import state from bmconfigparser import BMConfigParser -from debug import logger -from helper_threading import StoppableThread +from network.threads import StoppableThread SMTPDOMAIN = "bmaddr.lan" @@ -75,10 +73,12 @@ class smtpDeliver(StoppableThread): client.starttls() client.ehlo() client.sendmail(msg['From'], [to], msg.as_string()) - logger.info("Delivered via SMTP to %s through %s:%i ...", to, u.hostname, u.port) + self.logger.info( + 'Delivered via SMTP to %s through %s:%i ...', + to, u.hostname, u.port) client.quit() except: - logger.error("smtp delivery error", exc_info=True) + self.logger.error('smtp delivery error', exc_info=True) elif command == 'displayNewSentMessage': toAddress, fromLabel, fromAddress, subject, message, ackdata = data elif command == 'updateNetworkStatusTab': @@ -112,5 +112,5 @@ class smtpDeliver(StoppableThread): elif command == 'stopThread': break else: - sys.stderr.write( - 'Command sent to smtpDeliver not recognized: %s\n' % command) + self.logger.warning( + 'Command sent to smtpDeliver not recognized: %s', command) diff --git a/src/class_smtpServer.py b/src/class_smtpServer.py index 99d9c4b3..924333a6 100644 --- a/src/class_smtpServer.py +++ b/src/class_smtpServer.py @@ -1,26 +1,28 @@ import asyncore import base64 import email -from email.parser import Parser -from email.header import decode_header +import logging import re import signal import smtpd import threading import time +from email.header import decode_header +from email.parser import Parser +import queues from addresses import decodeAddress from bmconfigparser import BMConfigParser -from debug import logger -from helper_sql import sqlExecute from helper_ackPayload import genAckPayload -from helper_threading import StoppableThread -import queues +from helper_sql import sqlExecute +from network.threads import StoppableThread from version import softwareVersion SMTPDOMAIN = "bmaddr.lan" LISTENPORT = 8425 +logger = logging.getLogger('default') + class smtpServerChannel(smtpd.SMTPChannel): def smtp_EHLO(self, arg): @@ -39,7 +41,7 @@ class smtpServerChannel(smtpd.SMTPChannel): decoded = base64.b64decode(authstring) correctauth = "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdusername", "") + \ "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdpassword", "") - logger.debug("authstring: %s / %s", correctauth, decoded) + logger.debug('authstring: %s / %s', correctauth, decoded) if correctauth == decoded: self.auth = True self.push('235 2.7.0 Authentication successful') @@ -50,7 +52,7 @@ class smtpServerChannel(smtpd.SMTPChannel): def smtp_DATA(self, arg): if not hasattr(self, "auth") or not self.auth: - self.push ("530 Authentication required") + self.push('530 Authentication required') return smtpd.SMTPChannel.smtp_DATA(self, arg) @@ -98,17 +100,15 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): return ret - def process_message(self, peer, mailfrom, rcpttos, data): -# print 'Receiving message from:', peer p = re.compile(".*<([^>]+)>") if not hasattr(self.channel, "auth") or not self.channel.auth: - logger.error("Missing or invalid auth") + logger.error('Missing or invalid auth') return try: self.msg_headers = Parser().parsestr(data) except: - logger.error("Invalid headers") + logger.error('Invalid headers') return try: @@ -118,7 +118,7 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): if sender not in BMConfigParser().addresses(): raise Exception("Nonexisting user %s" % sender) except Exception as err: - logger.debug("Bad envelope from %s: %s", mailfrom, repr(err)) + logger.debug('Bad envelope from %s: %r', mailfrom, err) msg_from = self.decode_header("from") try: msg_from = p.sub(r'\1', self.decode_header("from")[0]) @@ -128,7 +128,7 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): if sender not in BMConfigParser().addresses(): raise Exception("Nonexisting user %s" % sender) except Exception as err: - logger.error("Bad headers from %s: %s", msg_from, repr(err)) + logger.error('Bad headers from %s: %r', msg_from, err) return try: @@ -147,11 +147,12 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): rcpt, domain = p.sub(r'\1', to).split("@") if domain != SMTPDOMAIN: raise Exception("Bad domain %s" % domain) - logger.debug("Sending %s to %s about %s", sender, rcpt, msg_subject) + logger.debug( + 'Sending %s to %s about %s', sender, rcpt, msg_subject) self.send(sender, rcpt, msg_subject, body) - logger.info("Relayed %s to %s", sender, rcpt) + logger.info('Relayed %s to %s', sender, rcpt) except Exception as err: - logger.error( "Bad to %s: %s", to, repr(err)) + logger.error('Bad to %s: %r', to, err) continue return @@ -169,21 +170,24 @@ class smtpServer(StoppableThread): def run(self): asyncore.loop(1) + def signals(signal, frame): - print "Got signal, terminating" + logger.warning('Got signal, terminating') for thread in threading.enumerate(): if thread.isAlive() and isinstance(thread, StoppableThread): thread.stopThread() + def runServer(): - print "Running SMTPd thread" + logger.warning('Running SMTPd thread') smtpThread = smtpServer() smtpThread.start() signal.signal(signal.SIGINT, signals) signal.signal(signal.SIGTERM, signals) - print "Processing" + logger.warning('Processing') smtpThread.join() - print "The end" + logger.warning('The end') + if __name__ == "__main__": runServer() diff --git a/src/debug.py b/src/debug.py index d3730d7f..7d523b3c 100644 --- a/src/debug.py +++ b/src/debug.py @@ -1,26 +1,38 @@ """ Logging and debuging facility -============================= +----------------------------- Levels: - DEBUG - Detailed information, typically of interest only when diagnosing problems. - INFO - Confirmation that things are working as expected. - WARNING - An indication that something unexpected happened, or indicative of some problem in the - near future (e.g. 'disk space low'). The software is still working as expected. - ERROR - Due to a more serious problem, the software has not been able to perform some function. - CRITICAL - A serious error, indicating that the program itself may be unable to continue running. + DEBUG + Detailed information, typically of interest only when diagnosing problems. + INFO + Confirmation that things are working as expected. + WARNING + An indication that something unexpected happened, or indicative of + some problem in the near future (e.g. 'disk space low'). The software + is still working as expected. + ERROR + Due to a more serious problem, the software has not been able to + perform some function. + CRITICAL + A serious error, indicating that the program itself may be unable to + continue running. -There are three loggers: `console_only`, `file_only` and `both`. +There are three loggers by default: `console_only`, `file_only` and `both`. +You can configure logging in the logging.dat in the appdata dir. +It's format is described in the :func:`logging.config.fileConfig` doc. -Use: `from debug import logger` to import this facility into whatever module you wish to log messages from. - Logging is thread-safe so you don't have to worry about locks, just import and log. +Use: +>>> import logging +>>> logger = logging.getLogger('default') + +The old form: ``from debug import logger`` is also may be used, +but only in the top level modules. + +Logging is thread-safe so you don't have to worry about locks, +just import and log. """ import ConfigParser @@ -28,6 +40,7 @@ import logging import logging.config import os import sys + import helper_startup import state @@ -41,10 +54,17 @@ log_level = 'WARNING' def log_uncaught_exceptions(ex_cls, ex, tb): + """The last resort logging function used for sys.excepthook""" logging.critical('Unhandled exception', exc_info=(ex_cls, ex, tb)) def configureLogging(): + """ + Configure logging, + using either logging.dat file in the state.appdata dir + or dictionary with hardcoded settings. + """ + sys.excepthook = log_uncaught_exceptions fail_msg = '' try: logging_config = os.path.join(state.appdata, 'logging.dat') @@ -63,9 +83,7 @@ def configureLogging(): # no need to confuse the user if the logger config is missing entirely fail_msg = 'Using default logger configuration' - sys.excepthook = log_uncaught_exceptions - - logging.config.dictConfig({ + logging_config = { 'version': 1, 'formatters': { 'default': { @@ -107,34 +125,28 @@ def configureLogging(): 'level': log_level, 'handlers': ['console'], }, - }) + } + + logging_config['loggers']['default'] = logging_config['loggers'][ + 'file_only' if '-c' in sys.argv else 'both'] + logging.config.dictConfig(logging_config) return True, fail_msg -def initLogging(): - preconfigured, msg = configureLogging() - if preconfigured: - if '-c' in sys.argv: - logger = logging.getLogger('file_only') - else: - logger = logging.getLogger('both') - else: - logger = logging.getLogger('default') - - if msg: - logger.log(logging.WARNING if preconfigured else logging.INFO, msg) - return logger - - def resetLogging(): + """Reconfigure logging in runtime when state.appdata dir changed""" global logger - for i in logger.handlers.iterkeys(): + for i in logger.handlers: logger.removeHandler(i) i.flush() i.close() - logger = initLogging() + configureLogging() + logger = logging.getLogger('default') # ! -logger = initLogging() +preconfigured, msg = configureLogging() +logger = logging.getLogger('default') +if msg: + logger.log(logging.WARNING if preconfigured else logging.INFO, msg) diff --git a/src/helper_threading.py b/src/helper_threading.py index 4b0a074e..e4fbe940 100644 --- a/src/helper_threading.py +++ b/src/helper_threading.py @@ -1,9 +1,6 @@ -"""Helper threading perform all the threading operations.""" +"""set_thread_name for threads that don't use StoppableThread""" import threading -from contextlib import contextmanager - -import helper_random try: import prctl @@ -22,37 +19,3 @@ else: threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap threading.Thread._Thread__bootstrap = _thread_name_hack - - -class StoppableThread(threading.Thread): - name = None - - def __init__(self, name=None): - if name: - self.name = name - super(StoppableThread, self).__init__(name=self.name) - self.initStop() - helper_random.seed() - - def initStop(self): - self.stop = threading.Event() - self._stopped = False - - def stopThread(self): - self._stopped = True - self.stop.set() - - -class BusyError(threading.ThreadError): - pass - - -@contextmanager -def nonBlocking(lock): - locked = lock.acquire(False) - if not locked: - raise BusyError - try: - yield - finally: - lock.release() diff --git a/src/messagetypes/__init__.py b/src/messagetypes/__init__.py index 7319dfd5..af6bcdaa 100644 --- a/src/messagetypes/__init__.py +++ b/src/messagetypes/__init__.py @@ -1,17 +1,15 @@ -""" -src/messagetypes/__init__.py -============================ -""" +import logging from importlib import import_module from os import path, listdir from string import lower -from debug import logger import messagetypes import paths +logger = logging.getLogger('default') -class MsgBase(object): # pylint: disable=too-few-public-methods + +class MsgBase(object): # pylint: disable=too-few-public-methods """Base class for message types""" def __init__(self): self.data = {"": lower(type(self).__name__)} diff --git a/src/messagetypes/message.py b/src/messagetypes/message.py index cd5bf762..573732d4 100644 --- a/src/messagetypes/message.py +++ b/src/messagetypes/message.py @@ -1,10 +1,9 @@ -""" -src/messagetypes/message.py -=========================== -""" -from debug import logger +import logging + from messagetypes import MsgBase +logger = logging.getLogger('default') + class Message(MsgBase): """Encapsulate a message""" diff --git a/src/messagetypes/vote.py b/src/messagetypes/vote.py index e128e9ba..b559c256 100644 --- a/src/messagetypes/vote.py +++ b/src/messagetypes/vote.py @@ -1,10 +1,9 @@ -""" -src/messagetypes/vote.py -======================== -""" -from debug import logger +import logging + from messagetypes import MsgBase +logger = logging.getLogger('default') + class Vote(MsgBase): """Module used to vote""" diff --git a/src/network/addrthread.py b/src/network/addrthread.py index 9f516e80..d5d21599 100644 --- a/src/network/addrthread.py +++ b/src/network/addrthread.py @@ -1,9 +1,9 @@ import Queue -from helper_threading import StoppableThread +import state from network.connectionpool import BMConnectionPool from queues import addrQueue -import state +from threads import StoppableThread class AddrThread(StoppableThread): diff --git a/src/network/advanceddispatcher.py b/src/network/advanceddispatcher.py index c8f125f0..eeb50bdf 100644 --- a/src/network/advanceddispatcher.py +++ b/src/network/advanceddispatcher.py @@ -10,8 +10,7 @@ import time import network.asyncore_pollchoose as asyncore import state -from debug import logger -from helper_threading import BusyError, nonBlocking +from threads import BusyError, nonBlocking class ProcessingError(Exception): @@ -84,7 +83,8 @@ class AdvancedDispatcher(asyncore.dispatcher): try: cmd = getattr(self, "state_" + str(self.state)) except AttributeError: - logger.error("Unknown state %s", self.state, exc_info=True) + self.logger.error( + 'Unknown state %s', self.state, exc_info=True) raise UnknownStateError(self.state) if not cmd(): break diff --git a/src/network/announcethread.py b/src/network/announcethread.py index 59fad128..5cd27ede 100644 --- a/src/network/announcethread.py +++ b/src/network/announcethread.py @@ -2,22 +2,20 @@ src/network/announcethread.py ================================= """ + import time +import state from bmconfigparser import BMConfigParser -from debug import logger -from helper_threading import StoppableThread from network.bmproto import BMProto from network.connectionpool import BMConnectionPool from network.udp import UDPSocket -import state +from threads import StoppableThread class AnnounceThread(StoppableThread): """A thread to manage regular announcing of this node""" - def __init__(self): - super(AnnounceThread, self).__init__(name="Announcer") - logger.info("init announce thread") + name = "Announcer" def run(self): lastSelfAnnounced = 0 diff --git a/src/network/bmobject.py b/src/network/bmobject.py index e19eaac9..ac6429e4 100644 --- a/src/network/bmobject.py +++ b/src/network/bmobject.py @@ -2,15 +2,17 @@ BMObject and it's exceptions. """ +import logging import time import protocol import state from addresses import calculateInventoryHash -from debug import logger from inventory import Inventory from network.dandelion import Dandelion +logger = logging.getLogger('default') + class BMObjectInsufficientPOWError(Exception): """Exception indicating the object doesn't have sufficient proof of work.""" diff --git a/src/network/bmproto.py b/src/network/bmproto.py index 0a2cdc7e..839630d8 100644 --- a/src/network/bmproto.py +++ b/src/network/bmproto.py @@ -5,6 +5,7 @@ src/network/bmproto.py # pylint: disable=attribute-defined-outside-init import base64 import hashlib +import logging import socket import struct import time @@ -16,7 +17,6 @@ import knownnodes import protocol import state from bmconfigparser import BMConfigParser -from debug import logger from inventory import Inventory from network.advanceddispatcher import AdvancedDispatcher from network.dandelion import Dandelion @@ -30,6 +30,8 @@ from objectracker import missingObjects, ObjectTracker from queues import objectProcessorQueue, portCheckerQueue, invQueue, addrQueue from randomtrackingdict import RandomTrackingDict +logger = logging.getLogger('default') + class BMProtoError(ProxyError): """A Bitmessage Protocol Base Error""" diff --git a/src/network/connectionchooser.py b/src/network/connectionchooser.py index 53ce30b7..ead8b31b 100644 --- a/src/network/connectionchooser.py +++ b/src/network/connectionchooser.py @@ -1,13 +1,15 @@ # pylint: disable=too-many-branches +import logging import random # nosec import knownnodes import protocol import state from bmconfigparser import BMConfigParser -from debug import logger from queues import Queue, portCheckerQueue +logger = logging.getLogger('default') + def getDiscoveredPeer(): try: diff --git a/src/network/connectionpool.py b/src/network/connectionpool.py index 4d16df49..1267522a 100644 --- a/src/network/connectionpool.py +++ b/src/network/connectionpool.py @@ -3,6 +3,7 @@ src/network/connectionpool.py ================================== """ import errno +import logging import re import socket import time @@ -14,7 +15,6 @@ import protocol import state from bmconfigparser import BMConfigParser from connectionchooser import chooseConnection -from debug import logger from proxy import Proxy from singleton import Singleton from tcp import ( @@ -22,6 +22,8 @@ from tcp import ( TCPConnection, TCPServer) from udp import UDPSocket +logger = logging.getLogger('default') + @Singleton # pylint: disable=too-many-instance-attributes diff --git a/src/network/dandelion.py b/src/network/dandelion.py index fa9081cb..eed3c6ff 100644 --- a/src/network/dandelion.py +++ b/src/network/dandelion.py @@ -2,6 +2,7 @@ src/network/dandelion.py ======================== """ +import logging from collections import namedtuple from random import choice, sample, expovariate from threading import RLock @@ -9,7 +10,6 @@ from time import time import connectionpool import state -from debug import logging from queues import invQueue from singleton import Singleton @@ -24,6 +24,8 @@ MAX_STEMS = 2 Stem = namedtuple('Stem', ['child', 'stream', 'timeout']) +logger = logging.getLogger('default') + @Singleton class Dandelion(): # pylint: disable=old-style-class @@ -72,7 +74,7 @@ class Dandelion(): # pylint: disable=old-style-class def removeHash(self, hashId, reason="no reason specified"): """Switch inventory vector from stem to fluff mode""" - logging.debug( + logger.debug( "%s entering fluff mode due to %s.", ''.join('%02x' % ord(i) for i in hashId), reason) with self.lock: diff --git a/src/network/downloadthread.py b/src/network/downloadthread.py index a4b58862..472b32c0 100644 --- a/src/network/downloadthread.py +++ b/src/network/downloadthread.py @@ -2,17 +2,17 @@ src/network/downloadthread.py ============================= """ + import time import addresses import helper_random import protocol from dandelion import Dandelion -from debug import logger -from helper_threading import StoppableThread from inventory import Inventory from network.connectionpool import BMConnectionPool from objectracker import missingObjects +from threads import StoppableThread class DownloadThread(StoppableThread): @@ -25,7 +25,6 @@ class DownloadThread(StoppableThread): def __init__(self): super(DownloadThread, self).__init__(name="Downloader") - logger.info("init download thread") self.lastCleaned = time.time() def cleanPending(self): @@ -78,7 +77,9 @@ class DownloadThread(StoppableThread): continue payload[0:0] = addresses.encodeVarint(chunkCount) i.append_write_buf(protocol.CreatePacket('getdata', payload)) - logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, chunkCount) + self.logger.debug( + '%s:%i Requesting %i objects', + i.destination.host, i.destination.port, chunkCount) requested += chunkCount if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: self.cleanPending() diff --git a/src/network/invthread.py b/src/network/invthread.py index ad3a0764..bffa6ecb 100644 --- a/src/network/invthread.py +++ b/src/network/invthread.py @@ -9,10 +9,10 @@ from time import time import addresses import protocol import state -from helper_threading import StoppableThread from network.connectionpool import BMConnectionPool from network.dandelion import Dandelion from queues import invQueue +from threads import StoppableThread def handleExpiredDandelion(expired): diff --git a/src/network/networkthread.py b/src/network/networkthread.py index 2a22367f..ba560906 100644 --- a/src/network/networkthread.py +++ b/src/network/networkthread.py @@ -1,20 +1,13 @@ -""" -src/network/networkthread.py -============================ -""" import network.asyncore_pollchoose as asyncore import state -from debug import logger -from helper_threading import StoppableThread from network.connectionpool import BMConnectionPool from queues import excQueue +from threads import StoppableThread class BMNetworkThread(StoppableThread): """A thread to handle network concerns""" - def __init__(self): - super(BMNetworkThread, self).__init__(name="Asyncore") - logger.info("init asyncore thread") + name = "Asyncore" def run(self): try: diff --git a/src/network/proxy.py b/src/network/proxy.py index 479663d3..e65ac6a7 100644 --- a/src/network/proxy.py +++ b/src/network/proxy.py @@ -3,6 +3,7 @@ src/network/proxy.py ==================== """ # pylint: disable=protected-access +import logging import socket import time @@ -10,7 +11,8 @@ import asyncore_pollchoose as asyncore import state from advanceddispatcher import AdvancedDispatcher from bmconfigparser import BMConfigParser -from debug import logger + +logger = logging.getLogger('default') class ProxyError(Exception): @@ -144,5 +146,6 @@ class Proxy(AdvancedDispatcher): def state_proxy_handshake_done(self): """Handshake is complete at this point""" - self.connectedAt = time.time() # pylint: disable=attribute-defined-outside-init + # pylint: disable=attribute-defined-outside-init + self.connectedAt = time.time() return False diff --git a/src/network/receivequeuethread.py b/src/network/receivequeuethread.py index 5d8cbd37..13c12ce2 100644 --- a/src/network/receivequeuethread.py +++ b/src/network/receivequeuethread.py @@ -2,18 +2,16 @@ import errno import Queue import socket -from debug import logger -from helper_threading import StoppableThread +import state from network.connectionpool import BMConnectionPool from network.advanceddispatcher import UnknownStateError from queues import receiveDataQueue -import state +from threads import StoppableThread class ReceiveQueueThread(StoppableThread): def __init__(self, num=0): super(ReceiveQueueThread, self).__init__(name="ReceiveQueue_%i" % num) - logger.info("init receive queue thread %i", num) def run(self): while not self._stopped and state.shutdown == 0: @@ -26,11 +24,12 @@ class ReceiveQueueThread(StoppableThread): break # cycle as long as there is data - # methods should return False if there isn't enough data, or the connection is to be aborted - - # state_* methods should return False if there isn't enough data, + # methods should return False if there isn't enough data, # or the connection is to be aborted + # state_* methods should return False if there isn't + # enough data, or the connection is to be aborted + try: connection = BMConnectionPool().getConnectionByAddr(dest) # KeyError = connection object not found @@ -40,13 +39,13 @@ class ReceiveQueueThread(StoppableThread): try: connection.process() # UnknownStateError = state isn't implemented - except (UnknownStateError): + except UnknownStateError: pass except socket.error as err: if err.errno == errno.EBADF: connection.set_state("close", 0) else: - logger.error("Socket error: %s", str(err)) + self.logger.error('Socket error: %s', err) except: - logger.error("Error processing", exc_info=True) + self.logger.error('Error processing', exc_info=True) receiveDataQueue.task_done() diff --git a/src/network/tcp.py b/src/network/tcp.py index da02df2f..368ca5e0 100644 --- a/src/network/tcp.py +++ b/src/network/tcp.py @@ -4,6 +4,7 @@ src/network/tcp.py ================== """ +import logging import math import random import socket @@ -18,7 +19,6 @@ import protocol import shared import state from bmconfigparser import BMConfigParser -from debug import logger from helper_random import randomBytes from inventory import Inventory from network.advanceddispatcher import AdvancedDispatcher @@ -30,6 +30,8 @@ from network.socks5 import Socks5Connection from network.tls import TLSDispatcher from queues import UISignalQueue, invQueue, receiveDataQueue +logger = logging.getLogger('default') + class TCPConnection(BMProto, TLSDispatcher): # pylint: disable=too-many-instance-attributes diff --git a/src/network/threads.py b/src/network/threads.py new file mode 100644 index 00000000..9bdaa85d --- /dev/null +++ b/src/network/threads.py @@ -0,0 +1,49 @@ +"""Threading primitives for the network package""" + +import logging +import random +import threading +from contextlib import contextmanager + + +class StoppableThread(threading.Thread): + """Base class for application threads with stopThread method""" + name = None + logger = logging.getLogger('default') + + def __init__(self, name=None): + if name: + self.name = name + super(StoppableThread, self).__init__(name=self.name) + self.stop = threading.Event() + self._stopped = False + random.seed() + self.logger.info('Init thread %s', self.name) + + def stopThread(self): + """Stop the thread""" + self._stopped = True + self.stop.set() + + +class BusyError(threading.ThreadError): + """ + Thread error raised when another connection holds the lock + we are trying to acquire. + """ + pass + + +@contextmanager +def nonBlocking(lock): + """ + A context manager which acquires given lock non-blocking + and raises BusyError if failed to acquire. + """ + locked = lock.acquire(False) + if not locked: + raise BusyError + try: + yield + finally: + lock.release() diff --git a/src/network/tls.py b/src/network/tls.py index 17b1ee1f..52f17c29 100644 --- a/src/network/tls.py +++ b/src/network/tls.py @@ -2,17 +2,18 @@ SSL/TLS negotiation. """ +import logging import os import socket import ssl import sys -from debug import logger from network.advanceddispatcher import AdvancedDispatcher import network.asyncore_pollchoose as asyncore from queues import receiveDataQueue import paths +logger = logging.getLogger('default') _DISCONNECTED_SSL = frozenset((ssl.SSL_ERROR_EOF,)) diff --git a/src/network/udp.py b/src/network/udp.py index 01dc1f7b..97c6aee5 100644 --- a/src/network/udp.py +++ b/src/network/udp.py @@ -2,24 +2,27 @@ src/network/udp.py ================== """ +import logging import time import socket import state import protocol from bmproto import BMProto -from debug import logger from objectracker import ObjectTracker from queues import receiveDataQueue +logger = logging.getLogger('default') -class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes + +class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes """Bitmessage protocol over UDP (class)""" port = 8444 announceInterval = 60 def __init__(self, host=None, sock=None, announcing=False): - super(BMProto, self).__init__(sock=sock) # pylint: disable=bad-super-call + # pylint: disable=bad-super-call + super(BMProto, self).__init__(sock=sock) self.verackReceived = True self.verackSent = True # .. todo:: sort out streams @@ -79,7 +82,8 @@ class UDPSocket(BMProto): # pylint: disable=too-many-instance-attribut decodedIP = protocol.checkIPAddress(str(ip)) if stream not in state.streamsInWhichIAmParticipating: continue - if (seenTime < time.time() - self.maxTimeOffset or seenTime > time.time() + self.maxTimeOffset): + if (seenTime < time.time() - self.maxTimeOffset + or seenTime > time.time() + self.maxTimeOffset): continue if decodedIP is False: # if the address isn't local, interpret it as diff --git a/src/network/uploadthread.py b/src/network/uploadthread.py index 9b29ef0a..1b57bd9a 100644 --- a/src/network/uploadthread.py +++ b/src/network/uploadthread.py @@ -1,26 +1,23 @@ """ src/network/uploadthread.py """ -# pylint: disable=unsubscriptable-object import time import helper_random import protocol -from debug import logger -from helper_threading import StoppableThread from inventory import Inventory from network.connectionpool import BMConnectionPool from network.dandelion import Dandelion from randomtrackingdict import RandomTrackingDict +from threads import StoppableThread class UploadThread(StoppableThread): - """This is a thread that uploads the objects that the peers requested from me """ + """ + This is a thread that uploads the objects that the peers requested from me + """ maxBufSize = 2097152 # 2MB - - def __init__(self): - super(UploadThread, self).__init__(name="Uploader") - logger.info("init upload thread") + name = "Uploader" def run(self): while not self._stopped: @@ -47,22 +44,26 @@ class UploadThread(StoppableThread): if Dandelion().hasHash(chunk) and \ i != Dandelion().objectChildStem(chunk): i.antiIntersectionDelay() - logger.info('%s asked for a stem object we didn\'t offer to it.', - i.destination) + self.logger.info( + '%s asked for a stem object we didn\'t offer to it.', + i.destination) break try: - payload.extend(protocol.CreatePacket('object', - Inventory()[chunk].payload)) + payload.extend(protocol.CreatePacket( + 'object', Inventory()[chunk].payload)) chunk_count += 1 except KeyError: i.antiIntersectionDelay() - logger.info('%s asked for an object we don\'t have.', i.destination) + self.logger.info( + '%s asked for an object we don\'t have.', + i.destination) break if not chunk_count: continue i.append_write_buf(payload) - logger.debug("%s:%i Uploading %i objects", - i.destination.host, i.destination.port, chunk_count) + self.logger.debug( + '%s:%i Uploading %i objects', + i.destination.host, i.destination.port, chunk_count) uploaded += chunk_count if not uploaded: self.stop.wait(1) diff --git a/src/shutdown.py b/src/shutdown.py index f136ac75..85d11d67 100644 --- a/src/shutdown.py +++ b/src/shutdown.py @@ -3,15 +3,15 @@ import Queue import threading import time -from debug import logger -from helper_sql import sqlQuery, sqlStoredProcedure -from helper_threading import StoppableThread -from knownnodes import saveKnownNodes -from inventory import Inventory -from queues import ( - addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue) import shared import state +from debug import logger +from helper_sql import sqlQuery, sqlStoredProcedure +from inventory import Inventory +from knownnodes import saveKnownNodes +from network.threads import StoppableThread +from queues import ( + addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue) def doCleanShutdown(): diff --git a/src/upnp.py b/src/upnp.py index fdc4bc1d..b1ee2e7b 100644 --- a/src/upnp.py +++ b/src/upnp.py @@ -21,8 +21,8 @@ import state import tr from bmconfigparser import BMConfigParser from debug import logger -from helper_threading import StoppableThread from network.connectionpool import BMConnectionPool +from network.threads import StoppableThread def createRequestXML(service, action, arguments=None): From d2a896697d83e1f99bdd649af8771fb73c54393d Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Wed, 7 Aug 2019 18:31:08 +0300 Subject: [PATCH 16/70] Used logger.isEnabledFor() to prevent unneeded calculations --- src/class_objectProcessor.py | 57 ++++++++++++++++++++---------------- src/network/bmproto.py | 2 +- src/network/dandelion.py | 7 +++-- src/network/tls.py | 41 ++++++++++++++++---------- 4 files changed, 62 insertions(+), 45 deletions(-) diff --git a/src/class_objectProcessor.py b/src/class_objectProcessor.py index 1a9c0d81..6ae46658 100644 --- a/src/class_objectProcessor.py +++ b/src/class_objectProcessor.py @@ -1,4 +1,5 @@ import hashlib +import logging import random import shared import threading @@ -24,10 +25,11 @@ import protocol import queues import state import tr -from debug import logger from fallback import RIPEMD160Hash import l10n +logger = logging.getLogger('default') + class objectProcessor(threading.Thread): """ @@ -316,13 +318,14 @@ class objectProcessor(threading.Thread): '\x04' + publicSigningKey + '\x04' + publicEncryptionKey) ripe = RIPEMD160Hash(sha.digest()).digest() - logger.debug( - 'within recpubkey, addressVersion: %s, streamNumber: %s' - '\nripe %s\npublicSigningKey in hex: %s' - '\npublicEncryptionKey in hex: %s', - addressVersion, streamNumber, hexlify(ripe), - hexlify(publicSigningKey), hexlify(publicEncryptionKey) - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + 'within recpubkey, addressVersion: %s, streamNumber: %s' + '\nripe %s\npublicSigningKey in hex: %s' + '\npublicEncryptionKey in hex: %s', + addressVersion, streamNumber, hexlify(ripe), + hexlify(publicSigningKey), hexlify(publicEncryptionKey) + ) address = encodeAddress(addressVersion, streamNumber, ripe) @@ -380,13 +383,14 @@ class objectProcessor(threading.Thread): sha.update(publicSigningKey + publicEncryptionKey) ripe = RIPEMD160Hash(sha.digest()).digest() - logger.debug( - 'within recpubkey, addressVersion: %s, streamNumber: %s' - '\nripe %s\npublicSigningKey in hex: %s' - '\npublicEncryptionKey in hex: %s', - addressVersion, streamNumber, hexlify(ripe), - hexlify(publicSigningKey), hexlify(publicEncryptionKey) - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + 'within recpubkey, addressVersion: %s, streamNumber: %s' + '\nripe %s\npublicSigningKey in hex: %s' + '\npublicEncryptionKey in hex: %s', + addressVersion, streamNumber, hexlify(ripe), + hexlify(publicSigningKey), hexlify(publicEncryptionKey) + ) address = encodeAddress(addressVersion, streamNumber, ripe) queryreturn = sqlQuery( @@ -579,17 +583,18 @@ class objectProcessor(threading.Thread): logger.debug('ECDSA verify failed') return logger.debug('ECDSA verify passed') - logger.debug( - 'As a matter of intellectual curiosity, here is the Bitcoin' - ' address associated with the keys owned by the other person:' - ' %s ..and here is the testnet address: %s. The other person' - ' must take their private signing key from Bitmessage and' - ' import it into Bitcoin (or a service like Blockchain.info)' - ' for it to be of any use. Do not use this unless you know' - ' what you are doing.', - helper_bitcoin.calculateBitcoinAddressFromPubkey(pubSigningKey), - helper_bitcoin.calculateTestnetAddressFromPubkey(pubSigningKey) - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + 'As a matter of intellectual curiosity, here is the Bitcoin' + ' address associated with the keys owned by the other person:' + ' %s ..and here is the testnet address: %s. The other person' + ' must take their private signing key from Bitmessage and' + ' import it into Bitcoin (or a service like Blockchain.info)' + ' for it to be of any use. Do not use this unless you know' + ' what you are doing.', + helper_bitcoin.calculateBitcoinAddressFromPubkey(pubSigningKey), + helper_bitcoin.calculateTestnetAddressFromPubkey(pubSigningKey) + ) # Used to detect and ignore duplicate messages in our inbox sigHash = hashlib.sha512( hashlib.sha512(signature).digest()).digest()[32:] diff --git a/src/network/bmproto.py b/src/network/bmproto.py index 839630d8..6375f393 100644 --- a/src/network/bmproto.py +++ b/src/network/bmproto.py @@ -510,7 +510,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): self.timeOffset = self.timestamp - int(time.time()) logger.debug('remoteProtocolVersion: %i', self.remoteProtocolVersion) logger.debug('services: 0x%08X', self.services) - logger.debug('time offset: %i', self.timestamp - int(time.time())) + logger.debug('time offset: %i', self.timeOffset) logger.debug('my external IP: %s', self.sockNode.host) logger.debug( 'remote node incoming address: %s:%i', diff --git a/src/network/dandelion.py b/src/network/dandelion.py index eed3c6ff..0f68cc24 100644 --- a/src/network/dandelion.py +++ b/src/network/dandelion.py @@ -74,9 +74,10 @@ class Dandelion(): # pylint: disable=old-style-class def removeHash(self, hashId, reason="no reason specified"): """Switch inventory vector from stem to fluff mode""" - logger.debug( - "%s entering fluff mode due to %s.", - ''.join('%02x' % ord(i) for i in hashId), reason) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + '%s entering fluff mode due to %s.', + ''.join('%02x' % ord(i) for i in hashId), reason) with self.lock: try: del self.hashMap[hashId] diff --git a/src/network/tls.py b/src/network/tls.py index 52f17c29..d5c4e23a 100644 --- a/src/network/tls.py +++ b/src/network/tls.py @@ -39,12 +39,13 @@ else: sslProtocolCiphers = "AECDH-AES256-SHA" -class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instance-attributes +class TLSDispatcher(AdvancedDispatcher): """TLS functionality for classes derived from AdvancedDispatcher""" - # pylint: disable=too-many-arguments, super-init-not-called, unused-argument + # pylint: disable=too-many-instance-attributes + # pylint: disable=too-many-arguments,super-init-not-called,unused-argument def __init__( - self, address=None, sock=None, certfile=None, keyfile=None, - server_side=False, ciphers=sslProtocolCiphers + self, address=None, sock=None, certfile=None, keyfile=None, + server_side=False, ciphers=sslProtocolCiphers ): self.want_read = self.want_write = True if certfile is None: @@ -96,7 +97,10 @@ class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instanc @staticmethod def state_tls_handshake(): - """Do nothing while TLS handshake is pending, as during this phase we need to react to callbacks instead""" + """ + Do nothing while TLS handshake is pending, as during this phase + we need to react to callbacks instead + """ return False def writable(self): @@ -122,10 +126,11 @@ class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instanc except AttributeError: return AdvancedDispatcher.readable(self) - def handle_read(self): # pylint: disable=inconsistent-return-statements + def handle_read(self): # pylint: disable=inconsistent-return-statements """ - Handle reads for sockets during TLS handshake. Requires special treatment as during the handshake, buffers must - remain empty and normal reads must be ignored + Handle reads for sockets during TLS handshake. Requires special + treatment as during the handshake, buffers must remain empty + and normal reads must be ignored. """ try: # wait for write buffer flush @@ -147,10 +152,11 @@ class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instanc self.handle_close() return - def handle_write(self): # pylint: disable=inconsistent-return-statements + def handle_write(self): # pylint: disable=inconsistent-return-statements """ - Handle writes for sockets during TLS handshake. Requires special treatment as during the handshake, buffers - must remain empty and normal writes must be ignored + Handle writes for sockets during TLS handshake. Requires special + treatment as during the handshake, buffers must remain empty + and normal writes must be ignored. """ try: # wait for write buffer flush @@ -193,18 +199,23 @@ class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instanc if not (self.want_write or self.want_read): raise except socket.error as err: - if err.errno in asyncore._DISCONNECTED: # pylint: disable=protected-access + # pylint: disable=protected-access + if err.errno in asyncore._DISCONNECTED: self.handle_close() else: raise else: if sys.version_info >= (2, 7, 9): self.tlsVersion = self.sslSocket.version() - logger.debug("%s:%i: TLS handshake success, TLS protocol version: %s", - self.destination.host, self.destination.port, self.sslSocket.version()) + logger.debug( + '%s:%i: TLS handshake success, TLS protocol version: %s', + self.destination.host, self.destination.port, + self.tlsVersion) else: self.tlsVersion = "TLSv1" - logger.debug("%s:%i: TLS handshake success", self.destination.host, self.destination.port) + logger.debug( + '%s:%i: TLS handshake success', + self.destination.host, self.destination.port) # The handshake has completed, so remove this channel and... self.del_channel() self.set_socket(self.sslSocket) From bbdbca253b69103f2219e7013260e100775df97f Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Wed, 14 Aug 2019 18:34:34 +0300 Subject: [PATCH 17/70] Added warnings about changing port settings in api and network.tcp --- src/api.py | 5 ++++- src/network/tcp.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/api.py b/src/api.py index b7f5c62d..f9d0a518 100644 --- a/src/api.py +++ b/src/api.py @@ -95,6 +95,8 @@ class singleAPI(StoppableThread): for attempt in range(50): try: if attempt > 0: + logger.warning( + 'Failed to start API listener on port %s', port) port = random.randint(32767, 65535) se = StoppableXMLRPCServer( (BMConfigParser().get( @@ -106,8 +108,9 @@ class singleAPI(StoppableThread): continue else: if attempt > 0: + logger.warning('Setting apiport to %s', port) BMConfigParser().set( - "bitmessagesettings", "apiport", str(port)) + 'bitmessagesettings', 'apiport', str(port)) BMConfigParser().save() break se.register_introspection_functions() diff --git a/src/network/tcp.py b/src/network/tcp.py index 368ca5e0..a1691ceb 100644 --- a/src/network/tcp.py +++ b/src/network/tcp.py @@ -371,6 +371,7 @@ class TCPServer(AdvancedDispatcher): for attempt in range(50): try: if attempt > 0: + logger.warning('Failed to bind on port %s', port) port = random.randint(32767, 65535) self.bind((host, port)) except socket.error as e: @@ -378,6 +379,7 @@ class TCPServer(AdvancedDispatcher): continue else: if attempt > 0: + logger.warning('Setting port to %s', port) BMConfigParser().set( 'bitmessagesettings', 'port', str(port)) BMConfigParser().save() From a48b51721d386eafbc4a07d69990997d70727112 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Fri, 30 Aug 2019 14:56:46 +0300 Subject: [PATCH 18/70] Test new logging approach, both debug.logger and resetLogging --- src/debug.py | 6 ++-- src/tests/test_logger.py | 69 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 2 deletions(-) create mode 100644 src/tests/test_logger.py diff --git a/src/debug.py b/src/debug.py index 7d523b3c..472b0d02 100644 --- a/src/debug.py +++ b/src/debug.py @@ -68,7 +68,8 @@ def configureLogging(): fail_msg = '' try: logging_config = os.path.join(state.appdata, 'logging.dat') - logging.config.fileConfig(logging_config) + logging.config.fileConfig( + logging_config, disable_existing_loggers=False) return ( False, 'Loaded logger configuration from %s' % logging_config @@ -80,7 +81,8 @@ def configureLogging(): ' logging config\n%s' % \ (logging_config, sys.exc_info()) else: - # no need to confuse the user if the logger config is missing entirely + # no need to confuse the user if the logger config + # is missing entirely fail_msg = 'Using default logger configuration' logging_config = { diff --git a/src/tests/test_logger.py b/src/tests/test_logger.py new file mode 100644 index 00000000..57448911 --- /dev/null +++ b/src/tests/test_logger.py @@ -0,0 +1,69 @@ +""" +Testing the logger configuration +""" + +import logging +import os +import tempfile +import unittest + + +class TestLogger(unittest.TestCase): + """A test case for bmconfigparser""" + + conf_template = ''' +[loggers] +keys=root + +[handlers] +keys=default + +[formatters] +keys=default + +[formatter_default] +format=%(asctime)s {1} %(message)s + +[handler_default] +class=FileHandler +level=NOTSET +formatter=default +args=('{0}', 'w') + +[logger_root] +level=DEBUG +handlers=default +''' + + def test_fileConfig(self): + """Put logging.dat with special pattern and check it was used""" + tmp = os.environ['BITMESSAGE_HOME'] = tempfile.gettempdir() + log_config = os.path.join(tmp, 'logging.dat') + log_file = os.path.join(tmp, 'debug.log') + + def gen_log_config(pattern): + """A small closure to generate logging.dat with custom pattern""" + with open(log_config, 'wb') as dst: + dst.write(self.conf_template.format(log_file, pattern)) + + pattern = r' o_0 ' + gen_log_config(pattern) + + try: + from pybitmessage.debug import logger, resetLogging + if not os.path.isfile(log_file): # second pass + pattern = r' <===> ' + gen_log_config(pattern) + resetLogging() + except ImportError: + self.fail('There is no package pybitmessage. Things gone wrong.') + finally: + os.remove(log_config) + + logger_ = logging.getLogger('default') + + self.assertEqual(logger, logger_) + + logger_.info('Testing the logger...') + + self.assertRegexpMatches(open(log_file).read(), pattern) From f0b4e4ded4592bb2c5f5b60a8704438ba07d2ca1 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Thu, 17 Oct 2019 23:37:56 +0300 Subject: [PATCH 19/70] Replaced logging.getLogger() in other possible places --- src/l10n.py | 3 +-- src/plugins/proxyconfig_stem.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/l10n.py b/src/l10n.py index b3b16341..d18f4199 100644 --- a/src/l10n.py +++ b/src/l10n.py @@ -6,8 +6,7 @@ import time from bmconfigparser import BMConfigParser -#logger = logging.getLogger(__name__) -logger = logging.getLogger('file_only') +logger = logging.getLogger('default') DEFAULT_ENCODING = 'ISO8859-1' diff --git a/src/plugins/proxyconfig_stem.py b/src/plugins/proxyconfig_stem.py index 5bb9a726..bdbfe8ca 100644 --- a/src/plugins/proxyconfig_stem.py +++ b/src/plugins/proxyconfig_stem.py @@ -18,7 +18,7 @@ class DebugLogger(object): """Safe logger wrapper for tor and plugin's logs""" # pylint: disable=too-few-public-methods def __init__(self): - self._logger = logging.getLogger(__name__.split('.', 1)[0]) + self._logger = logging.getLogger('default') self._levels = { 'err': 40, 'warn': 30, From b42f536d23ab617ac56fc71f9a7743333ad499ef Mon Sep 17 00:00:00 2001 From: George McCandless <5fk7echy8@riseup.net> Date: Tue, 8 Oct 2019 20:08:42 +0000 Subject: [PATCH 20/70] Add a checkbox to the network settings tab that allows restricting outbound connections to onion services (i.e., hosts that end with '.onion'). --- src/bitmessageqt/settings.py | 10 ++++++++++ src/bitmessageqt/settings.ui | 7 +++++++ src/network/connectionchooser.py | 5 +++++ 3 files changed, 22 insertions(+) diff --git a/src/bitmessageqt/settings.py b/src/bitmessageqt/settings.py index 513f285b..982328cc 100644 --- a/src/bitmessageqt/settings.py +++ b/src/bitmessageqt/settings.py @@ -99,6 +99,8 @@ class SettingsDialog(QtGui.QDialog): config.getboolean('bitmessagesettings', 'socksauthentication')) self.checkBoxSocksListen.setChecked( config.getboolean('bitmessagesettings', 'sockslisten')) + self.checkBoxOnionOnly.setChecked( + config.safeGetBoolean('bitmessagesettings', 'onionservicesonly')) proxy_type = config.safeGet( 'bitmessagesettings', 'socksproxytype', 'none') @@ -110,6 +112,7 @@ class SettingsDialog(QtGui.QDialog): self.lineEditSocksPassword.setEnabled(False) self.checkBoxAuthentication.setEnabled(False) self.checkBoxSocksListen.setEnabled(False) + self.checkBoxOnionOnly.setEnabled(False) elif proxy_type == 'SOCKS4a': self.comboBoxProxyType.setCurrentIndex(1) elif proxy_type == 'SOCKS5': @@ -200,11 +203,13 @@ class SettingsDialog(QtGui.QDialog): self.lineEditSocksPassword.setEnabled(False) self.checkBoxAuthentication.setEnabled(False) self.checkBoxSocksListen.setEnabled(False) + self.checkBoxOnionOnly.setEnabled(False) elif comboBoxIndex in (1, 2): self.lineEditSocksHostname.setEnabled(True) self.lineEditSocksPort.setEnabled(True) self.checkBoxAuthentication.setEnabled(True) self.checkBoxSocksListen.setEnabled(True) + self.checkBoxOnionOnly.setEnabled(True) if self.checkBoxAuthentication.isChecked(): self.lineEditSocksUsername.setEnabled(True) self.lineEditSocksPassword.setEnabled(True) @@ -334,6 +339,11 @@ class SettingsDialog(QtGui.QDialog): self.lineEditSocksPassword.text())) self.config.set('bitmessagesettings', 'sockslisten', str( self.checkBoxSocksListen.isChecked())) + if self.checkBoxOnionOnly.isChecked() \ + and not self.config.safeGetBoolean('bitmessagesettings', 'onionservicesonly'): + self.net_restart_needed = True + self.config.set('bitmessagesettings', 'onionservicesonly', str( + self.checkBoxOnionOnly.isChecked())) try: # Rounding to integers just for aesthetics self.config.set('bitmessagesettings', 'maxdownloadrate', str( diff --git a/src/bitmessageqt/settings.ui b/src/bitmessageqt/settings.ui index 307c06c2..963f2e64 100644 --- a/src/bitmessageqt/settings.ui +++ b/src/bitmessageqt/settings.ui @@ -403,6 +403,13 @@ + + + + Only connect to onion services (*.onion) + + + diff --git a/src/network/connectionchooser.py b/src/network/connectionchooser.py index ead8b31b..838ca45d 100644 --- a/src/network/connectionchooser.py +++ b/src/network/connectionchooser.py @@ -26,6 +26,8 @@ def getDiscoveredPeer(): def chooseConnection(stream): haveOnion = BMConfigParser().safeGet( "bitmessagesettings", "socksproxytype")[0:5] == 'SOCKS' + onionOnly = BMConfigParser().safeGetBoolean( + "bitmessagesettings", "onionservicesonly") if state.trustedPeer: return state.trustedPeer try: @@ -49,6 +51,9 @@ def chooseConnection(stream): logger.warning('Error in %s', peer) rating = 0 if haveOnion: + # do not connect to raw IP addresses--keep all traffic within Tor overlay + if onionOnly and not peer.host.endswith('.onion'): + continue # onion addresses have a higher priority when SOCKS if peer.host.endswith('.onion') and rating > 0: rating = 1 From f871cd450c15270209e33d3c350fa76b2936d147 Mon Sep 17 00:00:00 2001 From: George McCandless <5fk7echy8@riseup.net> Date: Thu, 24 Oct 2019 19:35:32 +0000 Subject: [PATCH 21/70] Add test for 'onionservicesonly' mode. Credit to Dmitri Bogomolov in commit 557a8cc6d2bec881b8a3c531d3f725460ed515f5. --- src/tests/core.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/tests/core.py b/src/tests/core.py index b01c6b69..8d24a768 100644 --- a/src/tests/core.py +++ b/src/tests/core.py @@ -173,6 +173,22 @@ class TestCore(unittest.TestCase): self.fail( 'Failed to connect during %s sec' % (time.time() - _started)) + def test_onionservicesonly(self): + """test onionservicesonly networking mode""" + BMConfigParser().set('bitmessagesettings', 'onionservicesonly', True) + self._initiate_bootstrap() + BMConfigParser().remove_option('bitmessagesettings', 'dontconnect') + for _ in range(360): + time.sleep(1) + for n, peer in enumerate(BMConnectionPool().outboundConnections): + if n > 2: + return + if not peer.host.endswith('.onion'): + self.fail( + 'Found non onion hostname %s in outbound connections!' + % peer.host) + self.fail('Failed to connect to at least 3 nodes within 360 sec') + def test_bootstrap(self): """test bootstrapping""" self._initiate_bootstrap() From 1a7ef791e548fb3329ee1e6320081c071cde777b Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Mon, 21 Oct 2019 11:12:29 +0300 Subject: [PATCH 22/70] message_data_reader is obsolete --- src/message_data_reader.py | 136 ------------------------------------- 1 file changed, 136 deletions(-) delete mode 100644 src/message_data_reader.py diff --git a/src/message_data_reader.py b/src/message_data_reader.py deleted file mode 100644 index de6ed6c0..00000000 --- a/src/message_data_reader.py +++ /dev/null @@ -1,136 +0,0 @@ -# pylint: disable=too-many-locals -""" -This program can be used to print out everything in your Inbox or Sent folders and also take things out of the trash. -Scroll down to the bottom to see the functions that you can uncomment. Save then run this file. -The functions which only read the database file seem to function just -fine even if you have Bitmessage running but you should definitly close -it before running the functions that make changes (like taking items out -of the trash). -""" - -from __future__ import absolute_import - -import sqlite3 -from binascii import hexlify -from time import strftime, localtime - -import paths -import queues - - -appdata = paths.lookupAppdataFolder() - -conn = sqlite3.connect(appdata + 'messages.dat') -conn.text_factory = str -cur = conn.cursor() - - -def readInbox(): - """Print each row from inbox table""" - print 'Printing everything in inbox table:' - item = '''select * from inbox''' - parameters = '' - cur.execute(item, parameters) - output = cur.fetchall() - for row in output: - print row - - -def readSent(): - """Print each row from sent table""" - print 'Printing everything in Sent table:' - item = '''select * from sent where folder !='trash' ''' - parameters = '' - cur.execute(item, parameters) - output = cur.fetchall() - for row in output: - (msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, - sleeptill, status, retrynumber, folder, encodingtype, ttl) = row # pylint: disable=unused-variable - print(hexlify(msgid), toaddress, 'toripe:', hexlify(toripe), 'fromaddress:', fromaddress, 'ENCODING TYPE:', - encodingtype, 'SUBJECT:', repr(subject), 'MESSAGE:', repr(message), 'ACKDATA:', hexlify(ackdata), - lastactiontime, status, retrynumber, folder) - - -def readSubscriptions(): - """Print each row from subscriptions table""" - print 'Printing everything in subscriptions table:' - item = '''select * from subscriptions''' - parameters = '' - cur.execute(item, parameters) - output = cur.fetchall() - for row in output: - print row - - -def readPubkeys(): - """Print each row from pubkeys table""" - print 'Printing everything in pubkeys table:' - item = '''select address, transmitdata, time, usedpersonally from pubkeys''' - parameters = '' - cur.execute(item, parameters) - output = cur.fetchall() - for row in output: - address, transmitdata, time, usedpersonally = row - print( - 'Address:', address, '\tTime first broadcast:', unicode( - strftime('%a, %d %b %Y %I:%M %p', localtime(time)), 'utf-8'), - '\tUsed by me personally:', usedpersonally, '\tFull pubkey message:', hexlify(transmitdata), - ) - - -def readInventory(): - """Print each row from inventory table""" - print 'Printing everything in inventory table:' - item = '''select hash, objecttype, streamnumber, payload, expirestime from inventory''' - parameters = '' - cur.execute(item, parameters) - output = cur.fetchall() - for row in output: - obj_hash, objecttype, streamnumber, payload, expirestime = row - print 'Hash:', hexlify(obj_hash), objecttype, streamnumber, '\t', hexlify(payload), '\t', unicode( - strftime('%a, %d %b %Y %I:%M %p', localtime(expirestime)), 'utf-8') - - -def takeInboxMessagesOutOfTrash(): - """Update all inbox messages with folder=trash to have folder=inbox""" - item = '''update inbox set folder='inbox' where folder='trash' ''' - parameters = '' - cur.execute(item, parameters) - _ = cur.fetchall() - conn.commit() - print 'done' - - -def takeSentMessagesOutOfTrash(): - """Update all sent messages with folder=trash to have folder=sent""" - item = '''update sent set folder='sent' where folder='trash' ''' - parameters = '' - cur.execute(item, parameters) - _ = cur.fetchall() - conn.commit() - print 'done' - - -def markAllInboxMessagesAsUnread(): - """Update all messages in inbox to have read=0""" - item = '''update inbox set read='0' ''' - parameters = '' - cur.execute(item, parameters) - _ = cur.fetchall() - conn.commit() - queues.UISignalQueue.put(('changedInboxUnread', None)) - print 'done' - - -def vacuum(): - """Perform a vacuum on the database""" - item = '''VACUUM''' - parameters = '' - cur.execute(item, parameters) - _ = cur.fetchall() - conn.commit() - print 'done' - - -if __name__ == '__main__': - readInbox() From ee5be281793582a8c62e690e0248d910b544ab5c Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:51:59 +0530 Subject: [PATCH 23/70] helper_threading quality fixes --- src/helper_threading.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/helper_threading.py b/src/helper_threading.py index e4fbe940..56dd7063 100644 --- a/src/helper_threading.py +++ b/src/helper_threading.py @@ -16,6 +16,6 @@ else: def _thread_name_hack(self): set_thread_name(self.name) threading.Thread.__bootstrap_original__(self) - + # pylint: disable=protected-access threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap threading.Thread._Thread__bootstrap = _thread_name_hack From afce50008571996e686c23861fe30b5071dfba12 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:52:41 +0530 Subject: [PATCH 24/70] knownnodes quality fixes --- src/knownnodes.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/knownnodes.py b/src/knownnodes.py index ba21bac7..1d9e6897 100644 --- a/src/knownnodes.py +++ b/src/knownnodes.py @@ -82,6 +82,7 @@ def pickle_deserialize_old_knownnodes(source): def saveKnownNodes(dirName=None): + """Save knownnodes to filesystem""" if dirName is None: dirName = state.appdata with knownNodesLock: @@ -90,6 +91,7 @@ def saveKnownNodes(dirName=None): def addKnownNode(stream, peer, lastseen=None, is_self=False): + """Add a new node to the dict""" knownNodes[stream][peer] = { "lastseen": lastseen or time.time(), "rating": 1 if is_self else 0, @@ -98,6 +100,7 @@ def addKnownNode(stream, peer, lastseen=None, is_self=False): def createDefaultKnownNodes(): + """Creating default Knownnodes""" past = time.time() - 2418600 # 28 days - 10 min for peer in DEFAULT_NODES: addKnownNode(1, peer, past) @@ -105,6 +108,7 @@ def createDefaultKnownNodes(): def readKnownNodes(): + """Load knownnodes from filesystem""" try: with open(state.appdata + 'knownnodes.dat', 'rb') as source: with knownNodesLock: @@ -131,6 +135,7 @@ def readKnownNodes(): def increaseRating(peer): + """Increase rating of a peer node""" increaseAmount = 0.1 maxRating = 1 with knownNodesLock: @@ -145,6 +150,7 @@ def increaseRating(peer): def decreaseRating(peer): + """Decrease rating of a peer node""" decreaseAmount = 0.1 minRating = -1 with knownNodesLock: @@ -159,6 +165,7 @@ def decreaseRating(peer): def trimKnownNodes(recAddrStream=1): + """Triming Knownnodes""" if len(knownNodes[recAddrStream]) < \ BMConfigParser().safeGetInt("knownnodes", "maxnodes"): return From 1181db66e00ba433c91c85ddf7f7916cc44f45fc Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:52:56 +0530 Subject: [PATCH 25/70] l10n quality fixes --- src/l10n.py | 48 +++++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/src/l10n.py b/src/l10n.py index d18f4199..bdfb03b3 100644 --- a/src/l10n.py +++ b/src/l10n.py @@ -1,4 +1,6 @@ - +""" +Localization +""" import logging import os import time @@ -49,7 +51,7 @@ except: if BMConfigParser().has_option('bitmessagesettings', 'timeformat'): time_format = BMConfigParser().get('bitmessagesettings', 'timeformat') - #Test the format string + # Test the format string try: time.strftime(time_format) except: @@ -58,48 +60,52 @@ if BMConfigParser().has_option('bitmessagesettings', 'timeformat'): else: time_format = DEFAULT_TIME_FORMAT -#It seems some systems lie about the encoding they use so we perform -#comprehensive decoding tests +# It seems some systems lie about the encoding they use so we perform +# comprehensive decoding tests if time_format != DEFAULT_TIME_FORMAT: try: - #Check day names + # Check day names for i in xrange(7): unicode(time.strftime(time_format, (0, 0, 0, 0, 0, 0, i, 0, 0)), encoding) - #Check month names + # Check month names for i in xrange(1, 13): unicode(time.strftime(time_format, (0, i, 0, 0, 0, 0, 0, 0, 0)), encoding) - #Check AM/PM + # Check AM/PM unicode(time.strftime(time_format, (0, 0, 0, 11, 0, 0, 0, 0, 0)), encoding) unicode(time.strftime(time_format, (0, 0, 0, 13, 0, 0, 0, 0, 0)), encoding) - #Check DST + # Check DST unicode(time.strftime(time_format, (0, 0, 0, 0, 0, 0, 0, 0, 1)), encoding) except: logger.exception('Could not decode locale formatted timestamp') time_format = DEFAULT_TIME_FORMAT encoding = DEFAULT_ENCODING + def setlocale(category, newlocale): + """Set the locale""" locale.setlocale(category, newlocale) # it looks like some stuff isn't initialised yet when this is called the # first time and its init gets the locale settings from the environment os.environ["LC_ALL"] = newlocale -def formatTimestamp(timestamp = None, as_unicode = True): - #For some reason some timestamps are strings so we need to sanitize. + +def formatTimestamp(timestamp=None, as_unicode=True): + """Return a formatted timestamp""" + # For some reason some timestamps are strings so we need to sanitize. if timestamp is not None and not isinstance(timestamp, int): try: timestamp = int(timestamp) except: timestamp = None - #timestamp can't be less than 0. + # timestamp can't be less than 0. if timestamp is not None and timestamp < 0: timestamp = None if timestamp is None: timestring = time.strftime(time_format) else: - #In case timestamp is too far in the future + # In case timestamp is too far in the future try: timestring = time.strftime(time_format, time.localtime(timestamp)) except ValueError: @@ -109,17 +115,21 @@ def formatTimestamp(timestamp = None, as_unicode = True): return unicode(timestring, encoding) return timestring + def getTranslationLanguage(): - userlocale = None - if BMConfigParser().has_option('bitmessagesettings', 'userlocale'): - userlocale = BMConfigParser().get('bitmessagesettings', 'userlocale') + """Return the user's language choice""" + userlocale = BMConfigParser().safeGet( + 'bitmessagesettings', 'userlocale', 'system') + return userlocale if userlocale and userlocale != 'system' else language - if userlocale in [None, '', 'system']: - return language - return userlocale - def getWindowsLocale(posixLocale): + """ + Get the Windows locale + Technically this converts the locale string from UNIX to Windows format, + because they use different ones in their + libraries. E.g. "en_EN.UTF-8" to "english". + """ if posixLocale in windowsLanguageMap: return windowsLanguageMap[posixLocale] if "." in posixLocale: From cacac00e21b12d61c55be0dd3cec267bba292222 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:53:23 +0530 Subject: [PATCH 26/70] openclpow quality fixes --- src/openclpow.py | 53 +++++++++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/src/openclpow.py b/src/openclpow.py index eb91a07f..fad25fa3 100644 --- a/src/openclpow.py +++ b/src/openclpow.py @@ -1,8 +1,9 @@ #!/usr/bin/env python2.7 +""" +Module for Proof of Work using OpenCL +""" from struct import pack, unpack -import time import hashlib -import random import os from bmconfigparser import BMConfigParser @@ -27,6 +28,8 @@ except ImportError: def initCL(): + """Initlialise OpenCL engine""" + # pylint: disable=global-statement global ctx, queue, program, hash_dt, libAvailable if libAvailable is False: return @@ -40,12 +43,13 @@ def initCL(): for platform in cl.get_platforms(): gpus.extend(platform.get_devices(device_type=cl.device_type.GPU)) if BMConfigParser().safeGet("bitmessagesettings", "opencl") == platform.vendor: - enabledGpus.extend(platform.get_devices(device_type=cl.device_type.GPU)) + enabledGpus.extend(platform.get_devices( + device_type=cl.device_type.GPU)) if platform.vendor not in vendors: vendors.append(platform.vendor) except: pass - if (len(enabledGpus) > 0): + if enabledGpus: ctx = cl.Context(devices=enabledGpus) queue = cl.CommandQueue(ctx) f = open(os.path.join(paths.codePath(), "bitmsghash", 'bitmsghash.cl'), 'r') @@ -55,23 +59,29 @@ def initCL(): else: logger.info("No OpenCL GPUs found") del enabledGpus[:] - except Exception as e: + except Exception: logger.error("OpenCL fail: ", exc_info=True) del enabledGpus[:] + def openclAvailable(): - return (len(gpus) > 0) + """Are there any OpenCL GPUs available?""" + return bool(gpus) + def openclEnabled(): - return (len(enabledGpus) > 0) + """Is OpenCL enabled (and available)?""" + return bool(enabledGpus) -def do_opencl_pow(hash, target): + +def do_opencl_pow(hash_, target): + """Perform PoW using OpenCL""" output = numpy.zeros(1, dtype=[('v', numpy.uint64, 1)]) - if (len(enabledGpus) == 0): + if not enabledGpus: return output[0][0] data = numpy.zeros(1, dtype=hash_dt, order='C') - data[0]['v'] = ("0000000000000000" + hash).decode("hex") + data[0]['v'] = ("0000000000000000" + hash_).decode("hex") data[0]['target'] = target hash_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=data) @@ -83,9 +93,8 @@ def do_opencl_pow(hash, target): kernel.set_arg(0, hash_buf) kernel.set_arg(1, dest_buf) - start = time.time() progress = 0 - globamt = worksize*2000 + globamt = worksize * 2000 while output[0][0] == 0 and shutdown == 0: kernel.set_arg(2, pack("Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8]) - print "{} - value {} < {}".format(nonce, trialValue, target) - + initCL() + target_ = 54227212183 + initialHash = ("3758f55b5a8d902fd3597e4ce6a2d3f23daff735f65d9698c270987f4e67ad590" + "b93f3ffeba0ef2fd08a8dc2f87b68ae5a0dc819ab57f22ad2c4c9c8618a43b3").decode("hex") + nonce = do_opencl_pow(initialHash.encode("hex"), target_) + trialValue, = unpack( + '>Q', hashlib.sha512(hashlib.sha512(pack('>Q', nonce) + initialHash).digest()).digest()[0:8]) + print "{} - value {} < {}".format(nonce, trialValue, target_) From 27be035e518792f4d38972c55d36ceea8f011c2b Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:53:38 +0530 Subject: [PATCH 27/70] paths quality fixes --- src/paths.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/paths.py b/src/paths.py index ada54c8e..59bc5e42 100644 --- a/src/paths.py +++ b/src/paths.py @@ -1,3 +1,6 @@ +""" +Path related functions +""" import logging import os import re @@ -41,7 +44,8 @@ def lookupAppdataFolder(): dataFolder = os.path.join( os.environ['HOME'], 'Library/Application Support/', APPNAME - ) + '/' # FIXME: should also be os.path.sep + ) + '/' + except KeyError: sys.exit( 'Could not find home folder, please report this message' @@ -75,6 +79,7 @@ def codePath(): return os.path.dirname(__file__) return ( os.environ.get('RESOURCEPATH') + # pylint: disable=protected-access if frozen == "macosx_app" else sys._MEIPASS) @@ -91,7 +96,7 @@ def tail(f, lines=20): # from the end of the file blocks = [] while lines_to_go > 0 and block_end_byte > 0: - if (block_end_byte - BLOCK_SIZE > 0): + if block_end_byte - BLOCK_SIZE > 0: # read the last block we haven't yet read f.seek(block_number * BLOCK_SIZE, 2) blocks.append(f.read(BLOCK_SIZE)) From 6f91ba1b33d1ebd469535213b9e680f405054216 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:53:53 +0530 Subject: [PATCH 28/70] shared quality fixes --- src/shared.py | 106 +++++++++++++++++++++++++++----------------------- 1 file changed, 58 insertions(+), 48 deletions(-) diff --git a/src/shared.py b/src/shared.py index 6d03bcca..1a2add28 100644 --- a/src/shared.py +++ b/src/shared.py @@ -1,21 +1,28 @@ -from __future__ import division +""" +Some shared functions + +.. deprecated:: 0.6.3 + Should be moved to different places and this file removed, + but it needs refactoring. +""" +from __future__ import division # Libraries. +import hashlib import os import sys import stat import threading -import hashlib import subprocess from binascii import hexlify from pyelliptic import arithmetic # Project imports. -import state import highlevelcrypto +import state +from addresses import decodeAddress, encodeVarint from bmconfigparser import BMConfigParser from debug import logger -from addresses import decodeAddress, encodeVarint from helper_sql import sqlQuery @@ -56,6 +63,7 @@ maximumLengthOfTimeToBotherResendingMessages = 0 def isAddressInMyAddressBook(address): + """Is address in my addressbook?""" queryreturn = sqlQuery( '''select address from addressbook where address=?''', address) @@ -64,6 +72,7 @@ def isAddressInMyAddressBook(address): # At this point we should really just have a isAddressInMy(book, address)... def isAddressInMySubscriptionsList(address): + """Am I subscribed to this address?""" queryreturn = sqlQuery( '''select * from subscriptions where address=?''', str(address)) @@ -71,6 +80,7 @@ def isAddressInMySubscriptionsList(address): def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address): + """Am I subscribed to this address, is it in my addressbook or whitelist?""" if isAddressInMyAddressBook(address): return True @@ -90,7 +100,8 @@ def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address): return False -def decodeWalletImportFormat(WIFstring): +def decodeWalletImportFormat(WIFstring): # pylint: disable=inconsistent-return-statements + """Convert private key from base58 that's used in the config file to 8-bit binary string""" fullString = arithmetic.changebase(WIFstring, 58, 256) privkey = fullString[:-4] if fullString[-4:] != \ @@ -101,7 +112,7 @@ def decodeWalletImportFormat(WIFstring): ' 6 characters of the PRIVATE key: %s', str(WIFstring)[:6] ) - os._exit(0) + os._exit(0) # pylint: disable=protected-access # return "" elif privkey[0] == '\x80': # checksum passed return privkey[1:] @@ -111,10 +122,11 @@ def decodeWalletImportFormat(WIFstring): ' the checksum passed but the key doesn\'t begin with hex 80.' ' Here is the PRIVATE key: %s', WIFstring ) - os._exit(0) + os._exit(0) # pylint: disable=protected-access def reloadMyAddressHashes(): + """Reinitialise runtime data (e.g. encryption objects, address hashes) from the config file""" logger.debug('reloading keys from keys.dat file') myECCryptorObjects.clear() myAddressesByHash.clear() @@ -128,26 +140,21 @@ def reloadMyAddressHashes(): if isEnabled: hasEnabledKeys = True # status - _, addressVersionNumber, streamNumber, hash = \ - decodeAddress(addressInKeysFile) + addressVersionNumber, streamNumber, hashobj = decodeAddress(addressInKeysFile)[1:] if addressVersionNumber in (2, 3, 4): # Returns a simple 32 bytes of information encoded # in 64 Hex characters, or null if there was an error. privEncryptionKey = hexlify(decodeWalletImportFormat( - BMConfigParser().get(addressInKeysFile, 'privencryptionkey')) - ) - + BMConfigParser().get(addressInKeysFile, 'privencryptionkey'))) # It is 32 bytes encoded as 64 hex characters if len(privEncryptionKey) == 64: - myECCryptorObjects[hash] = \ + myECCryptorObjects[hashobj] = \ highlevelcrypto.makeCryptor(privEncryptionKey) - myAddressesByHash[hash] = addressInKeysFile + myAddressesByHash[hashobj] = addressInKeysFile tag = hashlib.sha512(hashlib.sha512( encodeVarint(addressVersionNumber) + - encodeVarint(streamNumber) + hash).digest() - ).digest()[32:] + encodeVarint(streamNumber) + hashobj).digest()).digest()[32:] myAddressesByTag[tag] = addressInKeysFile - else: logger.error( 'Error in reloadMyAddressHashes: Can\'t handle' @@ -159,6 +166,7 @@ def reloadMyAddressHashes(): def reloadBroadcastSendersForWhichImWatching(): + """Reinitialise runtime data for the broadcasts I'm subscribed to from the config file""" broadcastSendersForWhichImWatching.clear() MyECSubscriptionCryptorObjects.clear() queryreturn = sqlQuery('SELECT address FROM subscriptions where enabled=1') @@ -166,9 +174,9 @@ def reloadBroadcastSendersForWhichImWatching(): for row in queryreturn: address, = row # status - _, addressVersionNumber, streamNumber, hash = decodeAddress(address) + addressVersionNumber, streamNumber, hashobj = decodeAddress(address)[1:] if addressVersionNumber == 2: - broadcastSendersForWhichImWatching[hash] = 0 + broadcastSendersForWhichImWatching[hashobj] = 0 # Now, for all addresses, even version 2 addresses, # we should create Cryptor objects in a dictionary which we will # use to attempt to decrypt encrypted broadcast messages. @@ -176,14 +184,14 @@ def reloadBroadcastSendersForWhichImWatching(): if addressVersionNumber <= 3: privEncryptionKey = hashlib.sha512( encodeVarint(addressVersionNumber) + - encodeVarint(streamNumber) + hash + encodeVarint(streamNumber) + hashobj ).digest()[:32] - MyECSubscriptionCryptorObjects[hash] = \ + MyECSubscriptionCryptorObjects[hashobj] = \ highlevelcrypto.makeCryptor(hexlify(privEncryptionKey)) else: doubleHashOfAddressData = hashlib.sha512(hashlib.sha512( encodeVarint(addressVersionNumber) + - encodeVarint(streamNumber) + hash + encodeVarint(streamNumber) + hashobj ).digest()).digest() tag = doubleHashOfAddressData[32:] privEncryptionKey = doubleHashOfAddressData[:32] @@ -192,21 +200,22 @@ def reloadBroadcastSendersForWhichImWatching(): def fixPotentiallyInvalidUTF8Data(text): + """Sanitise invalid UTF-8 strings""" try: unicode(text, 'utf-8') return text except: return 'Part of the message is corrupt. The message cannot be' \ - ' displayed the normal way.\n\n' + repr(text) + ' displayed the normal way.\n\n' + repr(text) -# Checks sensitive file permissions for inappropriate umask -# during keys.dat creation. (Or unwise subsequent chmod.) -# -# Returns true iff file appears to have appropriate permissions. def checkSensitiveFilePermissions(filename): + """ + :param str filename: path to the file + :return: True if file appears to have appropriate permissions. + """ if sys.platform == 'win32': - # TODO: This might deserve extra checks by someone familiar with + # .. todo:: This might deserve extra checks by someone familiar with # Windows systems. return True elif sys.platform[:7] == 'freebsd': @@ -214,30 +223,30 @@ def checkSensitiveFilePermissions(filename): present_permissions = os.stat(filename)[0] disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO return present_permissions & disallowed_permissions == 0 - else: - try: - # Skip known problems for non-Win32 filesystems - # without POSIX permissions. - fstype = subprocess.check_output( - 'stat -f -c "%%T" %s' % (filename), - shell=True, - stderr=subprocess.STDOUT - ) - if 'fuseblk' in fstype: - logger.info( - 'Skipping file permissions check for %s.' - ' Filesystem fuseblk detected.', filename) - return True - except: - # Swallow exception here, but we might run into trouble later! - logger.error('Could not determine filesystem type. %s', filename) - present_permissions = os.stat(filename)[0] - disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO - return present_permissions & disallowed_permissions == 0 + try: + # Skip known problems for non-Win32 filesystems + # without POSIX permissions. + fstype = subprocess.check_output( + 'stat -f -c "%%T" %s' % (filename), + shell=True, + stderr=subprocess.STDOUT + ) + if 'fuseblk' in fstype: + logger.info( + 'Skipping file permissions check for %s.' + ' Filesystem fuseblk detected.', filename) + return True + except: + # Swallow exception here, but we might run into trouble later! + logger.error('Could not determine filesystem type. %s', filename) + present_permissions = os.stat(filename)[0] + disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO + return present_permissions & disallowed_permissions == 0 # Fixes permissions on a sensitive file. def fixSensitiveFilePermissions(filename, hasEnabledKeys): + """Try to change file permissions to be more restrictive""" if hasEnabledKeys: logger.warning( 'Keyfile had insecure permissions, and there were enabled' @@ -262,6 +271,7 @@ def fixSensitiveFilePermissions(filename, hasEnabledKeys): def openKeysFile(): + """Open keys file with an external editor""" if 'linux' in sys.platform: subprocess.call(["xdg-open", state.appdata + 'keys.dat']) else: From 503d0b33d074cf09d928d945cfff5c89ac9dd7d4 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:54:04 +0530 Subject: [PATCH 29/70] shutdown quality fixes --- src/shutdown.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/shutdown.py b/src/shutdown.py index 85d11d67..1d40a90f 100644 --- a/src/shutdown.py +++ b/src/shutdown.py @@ -1,3 +1,4 @@ +"""shutdown function""" import os import Queue import threading @@ -15,8 +16,7 @@ from queues import ( def doCleanShutdown(): - # Used to tell proof of work worker threads - # and the objectProcessorThread to exit. + """Used to tell proof of work worker threads and the objectProcessorThread to exit.""" state.shutdown = 1 objectProcessorQueue.put(('checkShutdownVariable', 'no data')) @@ -53,7 +53,7 @@ def doCleanShutdown(): for thread in threading.enumerate(): if (thread is not threading.currentThread() and - isinstance(thread, StoppableThread) and + isinstance(thread, StoppableThread) and thread.name != 'SQL'): logger.debug("Waiting for thread %s", thread.name) thread.join() @@ -76,10 +76,10 @@ def doCleanShutdown(): except Queue.Empty: break - if shared.thisapp.daemon or not state.enableGUI: # FIXME redundant? + if shared.thisapp.daemon or not state.enableGUI: # ..fixme:: redundant? logger.info('Clean shutdown complete.') shared.thisapp.cleanup() - os._exit(0) + os._exit(0) # pylint: disable=protected-access else: logger.info('Core shutdown complete.') for thread in threading.enumerate(): From b9ad6a3bac4b0ec05822e3ff6b4c9790e23570fc Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:54:20 +0530 Subject: [PATCH 30/70] singleinstance quality fixes --- src/singleinstance.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/singleinstance.py b/src/singleinstance.py index 03bda504..d0a0871c 100644 --- a/src/singleinstance.py +++ b/src/singleinstance.py @@ -16,7 +16,7 @@ except ImportError: pass -class singleinstance: +class singleinstance(object): """ Implements a single instance application by creating a lock file at appdata. @@ -40,6 +40,7 @@ class singleinstance: atexit.register(self.cleanup) def lock(self): + """Obtain single instance lock""" if self.lockPid is None: self.lockPid = os.getpid() if sys.platform == 'win32': @@ -52,8 +53,7 @@ class singleinstance: self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC ) - except OSError: - type, e, tb = sys.exc_info() + except OSError as e: if e.errno == 13: print( 'Another instance of this application' @@ -84,6 +84,7 @@ class singleinstance: self.fp.flush() def cleanup(self): + """Release single instance lock""" if not self.initialized: return if self.daemon and self.lockPid == os.getpid(): @@ -94,7 +95,7 @@ class singleinstance: os.close(self.fd) else: fcntl.lockf(self.fp, fcntl.LOCK_UN) - except Exception, e: + except Exception: pass return From fda5d23c2d93e23f5ef14c63ab539a37cba74702 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:54:37 +0530 Subject: [PATCH 31/70] state quality fixes --- src/state.py | 56 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 21 deletions(-) diff --git a/src/state.py b/src/state.py index 3e051edf..a3b930ab 100644 --- a/src/state.py +++ b/src/state.py @@ -1,30 +1,42 @@ +""" +Global runtime variables. +""" import collections neededPubkeys = {} streamsInWhichIAmParticipating = [] -# For UPnP extPort = None +"""For UPnP""" -# for Tor hidden service socksIP = None +"""for Tor hidden service""" -appdata = '' # holds the location of the application data storage directory +appdata = '' +"""holds the location of the application data storage directory""" -# Set to 1 by the doCleanShutdown function. -# Used to tell the proof of work worker threads to exit. shutdown = 0 +""" + Set to 1 by the doCleanShutdown function. + Used to tell the proof of work worker threads to exit. +""" # Component control flags - set on startup, do not change during runtime # The defaults are for standalone GUI (default operating mode) -enableNetwork = True # enable network threads -enableObjProc = True # enable object processing threads -enableAPI = True # enable API (if configured) -enableGUI = True # enable GUI (QT or ncurses) -enableSTDIO = False # enable STDIO threads +enableNetwork = True +"""enable network threads""" +enableObjProc = True +"""enable object processing threads""" +enableAPI = True +"""enable API (if configured)""" +enableGUI = True +"""enable GUI (QT or ncurses)""" +enableSTDIO = False +"""enable STDIO threads""" curses = False -sqlReady = False # set to true by sqlTread when ready for processing +sqlReady = False +"""set to true by sqlTread when ready for processing""" maximumNumberOfHalfOpenConnections = 0 @@ -35,17 +47,19 @@ uploadThread = None ownAddresses = {} -# If the trustedpeer option is specified in keys.dat then this will -# contain a Peer which will be connected to instead of using the -# addresses advertised by other peers. The client will only connect to -# this peer and the timing attack mitigation will be disabled in order -# to download data faster. The expected use case is where the user has -# a fast connection to a trusted server where they run a BitMessage -# daemon permanently. If they then run a second instance of the client -# on a local machine periodically when they want to check for messages -# it will sync with the network a lot faster without compromising -# security. trustedPeer = None +""" + If the trustedpeer option is specified in keys.dat then this will + contain a Peer which will be connected to instead of using the + addresses advertised by other peers. The client will only connect to + this peer and the timing attack mitigation will be disabled in order + to download data faster. The expected use case is where the user has + a fast connection to a trusted server where they run a BitMessage + daemon permanently. If they then run a second instance of the client + on a local machine periodically when they want to check for messages + it will sync with the network a lot faster without compromising + security. +""" discoveredPeers = {} From 58e5fac6d724b1bd65dbebe76ad093379fb39b34 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 22 Oct 2019 19:54:52 +0530 Subject: [PATCH 32/70] tr quality fixes --- src/tr.py | 46 +++++++++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/src/tr.py b/src/tr.py index 8b41167f..87e67219 100644 --- a/src/tr.py +++ b/src/tr.py @@ -1,22 +1,36 @@ +""" +Translating text +""" import os import state -# This is used so that the translateText function can be used when we are in daemon mode and not using any QT functions. + class translateClass: + """ + This is used so that the translateText function can be used + when we are in daemon mode and not using any QT functions. + """ + # pylint: disable=old-style-class,too-few-public-methods def __init__(self, context, text): self.context = context self.text = text - def arg(self,argument): - if '%' in self.text: - return translateClass(self.context, self.text.replace('%','',1)) # This doesn't actually do anything with the arguments because we don't have a UI in which to display this information anyway. - else: - return self.text -def _translate(context, text, disambiguation = None, encoding = None, n = None): + def arg(self, _): + """Replace argument placeholders""" + if '%' in self.text: + # This doesn't actually do anything with the arguments + # because we don't have a UI in which to display this information anyway. + return translateClass(self.context, self.text.replace('%', '', 1)) + return self.text + + +def _translate(context, text, disambiguation=None, encoding=None, n=None): # pylint: disable=unused-argument return translateText(context, text, n) -def translateText(context, text, n = None): + +def translateText(context, text, n=None): + """Translate text in context""" try: enableGUI = state.enableGUI except AttributeError: # inside the plugin @@ -25,15 +39,17 @@ def translateText(context, text, n = None): try: from PyQt4 import QtCore, QtGui except Exception as err: - print 'PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\'. If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon' + print 'PyBitmessage requires PyQt unless you want to run it as a daemon'\ + ' and interact with it using the API.'\ + ' You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download'\ + ' or by searching Google for \'PyQt Download\'.'\ + ' If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon' print 'Error message:', err - os._exit(0) + os._exit(0) # pylint: disable=protected-access if n is None: return QtGui.QApplication.translate(context, text) - else: - return QtGui.QApplication.translate(context, text, None, QtCore.QCoreApplication.CodecForTr, n) + return QtGui.QApplication.translate(context, text, None, QtCore.QCoreApplication.CodecForTr, n) else: if '%' in text: - return translateClass(context, text.replace('%','',1)) - else: - return text + return translateClass(context, text.replace('%', '', 1)) + return text From 061a9ef973c09ceffd5b98a9f3945f54af8de263 Mon Sep 17 00:00:00 2001 From: bug Lady Date: Sun, 10 Nov 2019 04:07:50 +0100 Subject: [PATCH 33/70] fix typos and flesh out placeholder --- docs/contribute.dir/processes.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/contribute.dir/processes.rst b/docs/contribute.dir/processes.rst index 8f0385d4..eb913325 100644 --- a/docs/contribute.dir/processes.rst +++ b/docs/contribute.dir/processes.rst @@ -1,8 +1,8 @@ Processes ========= -In other to keep the Bitmessage project running the team run a number of systems and accounts that form the -development pipeline and continuous delivery process. We are always striving to improve the process. Towards +In order to keep the Bitmessage project running, the team runs a number of systems and accounts that form the +development pipeline and continuous delivery process. We are always striving to improve this process. Towards that end it is documented here. @@ -20,7 +20,7 @@ Our official Github_ account is Bitmessage. Our issue tracker is here as well. BitMessage ---------- -We eat our own dog food! You can send us bug reports via the Bitmessage chan at xxx +We eat our own dog food! You can send us bug reports via the [chan] bitmessage BM-2cWy7cvHoq3f1rYMerRJp8PT653jjSuEdY .. _website: https://bitmessage.org From 341651973a1ebbffed9e6ae2c133c25675848428 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Sun, 27 Oct 2019 15:15:45 +0200 Subject: [PATCH 34/70] Reduced imports: - exported from network package all objects used outside; - made all threads available in threads module. Wrote some module docstrings. --- src/bitmessagemain.py | 34 ++++++++--------------- src/bitmessageqt/networkstatus.py | 2 +- src/class_addressGenerator.py | 6 +++- src/class_objectProcessor.py | 9 +++--- src/class_singleCleaner.py | 3 +- src/class_singleWorker.py | 2 +- src/class_sqlThread.py | 6 ++++ src/helper_sql.py | 30 +++++++++++++++++--- src/helper_threading.py | 21 -------------- src/network/__init__.py | 17 ++++++++++++ src/network/receivequeuethread.py | 6 ++-- src/shutdown.py | 2 +- src/threads.py | 46 +++++++++++++++++++++++++++++++ src/upnp.py | 3 +- 14 files changed, 123 insertions(+), 64 deletions(-) delete mode 100644 src/helper_threading.py create mode 100644 src/threads.py diff --git a/src/bitmessagemain.py b/src/bitmessagemain.py index 4ad9311f..81702783 100755 --- a/src/bitmessagemain.py +++ b/src/bitmessagemain.py @@ -41,30 +41,18 @@ import shared import knownnodes import state import shutdown -from debug import logger # this should go before any threads - -# Classes -from class_sqlThread import sqlThread -from class_singleCleaner import singleCleaner -from class_objectProcessor import objectProcessor -from class_singleWorker import singleWorker -from class_addressGenerator import addressGenerator from bmconfigparser import BMConfigParser - +from debug import logger # this should go before any threads from inventory import Inventory - -from network.connectionpool import BMConnectionPool -from network.dandelion import Dandelion -from network.networkthread import BMNetworkThread -from network.receivequeuethread import ReceiveQueueThread -from network.announcethread import AnnounceThread -from network.invthread import InvThread -from network.addrthread import AddrThread -from network.downloadthread import DownloadThread -from network.uploadthread import UploadThread - -# Helper Functions -import helper_threading +# Network objects and threads +from network import ( + BMConnectionPool, Dandelion, + AddrThread, AnnounceThread, BMNetworkThread, InvThread, ReceiveQueueThread, + DownloadThread, UploadThread) +# Synchronous threads +from threads import ( + set_thread_name, + addressGenerator, objectProcessor, singleCleaner, singleWorker, sqlThread) def connectToStream(streamNumber): @@ -275,7 +263,7 @@ class Main: self.setSignalHandler() - helper_threading.set_thread_name("PyBitmessage") + set_thread_name("PyBitmessage") state.dandelion = config.safeGetInt('network', 'dandelion') # dandelion requires outbound connections, without them, diff --git a/src/bitmessageqt/networkstatus.py b/src/bitmessageqt/networkstatus.py index 5f014563..6fbf5df6 100644 --- a/src/bitmessageqt/networkstatus.py +++ b/src/bitmessageqt/networkstatus.py @@ -14,7 +14,7 @@ import network.stats import shared import widgets from inventory import Inventory -from network.connectionpool import BMConnectionPool +from network import BMConnectionPool from retranslateui import RetranslateMixin from tr import _translate from uisignaler import UISignaler diff --git a/src/class_addressGenerator.py b/src/class_addressGenerator.py index fa268377..c7c7e261 100644 --- a/src/class_addressGenerator.py +++ b/src/class_addressGenerator.py @@ -14,7 +14,7 @@ import highlevelcrypto from bmconfigparser import BMConfigParser from addresses import decodeAddress, encodeAddress, encodeVarint from fallback import RIPEMD160Hash -from network.threads import StoppableThread +from network import StoppableThread class addressGenerator(StoppableThread): @@ -29,6 +29,10 @@ class addressGenerator(StoppableThread): super(addressGenerator, self).stopThread() def run(self): + """ + Process the requests for addresses generation + from `.queues.addressGeneratorQueue` + """ while state.shutdown == 0: queueValue = queues.addressGeneratorQueue.get() nonceTrialsPerByte = 0 diff --git a/src/class_objectProcessor.py b/src/class_objectProcessor.py index 6ae46658..e2b95447 100644 --- a/src/class_objectProcessor.py +++ b/src/class_objectProcessor.py @@ -57,6 +57,7 @@ class objectProcessor(threading.Thread): self.successfullyDecryptMessageTimings = [] def run(self): + """Process the objects from `.queues.objectProcessorQueue`""" while True: objectType, data = queues.objectProcessorQueue.get() @@ -1051,7 +1052,8 @@ class objectProcessor(threading.Thread): # for it. elif addressVersion >= 4: tag = hashlib.sha512(hashlib.sha512( - encodeVarint(addressVersion) + encodeVarint(streamNumber) + ripe + encodeVarint(addressVersion) + encodeVarint(streamNumber) + + ripe ).digest()).digest()[32:] if tag in state.neededPubkeys: del state.neededPubkeys[tag] @@ -1059,9 +1061,8 @@ class objectProcessor(threading.Thread): def sendMessages(self, address): """ - This function is called by the possibleNewPubkey function when - that function sees that we now have the necessary pubkey - to send one or more messages. + This method is called by the `possibleNewPubkey` when it sees + that we now have the necessary pubkey to send one or more messages. """ logger.info('We have been awaiting the arrival of this pubkey.') sqlExecute( diff --git a/src/class_singleCleaner.py b/src/class_singleCleaner.py index fc53a5b0..4717c3cb 100644 --- a/src/class_singleCleaner.py +++ b/src/class_singleCleaner.py @@ -31,8 +31,7 @@ import tr from bmconfigparser import BMConfigParser from helper_sql import sqlQuery, sqlExecute from inventory import Inventory -from network.connectionpool import BMConnectionPool -from network.threads import StoppableThread +from network import BMConnectionPool, StoppableThread class singleCleaner(StoppableThread): diff --git a/src/class_singleWorker.py b/src/class_singleWorker.py index 77fa18c0..60eabe2e 100644 --- a/src/class_singleWorker.py +++ b/src/class_singleWorker.py @@ -28,7 +28,7 @@ from addresses import calculateInventoryHash, decodeAddress, decodeVarint, encod from bmconfigparser import BMConfigParser from helper_sql import sqlExecute, sqlQuery from inventory import Inventory -from network.threads import StoppableThread +from network import StoppableThread def sizeof_fmt(num, suffix='h/s'): diff --git a/src/class_sqlThread.py b/src/class_sqlThread.py index a45571e0..bcb56303 100644 --- a/src/class_sqlThread.py +++ b/src/class_sqlThread.py @@ -1,3 +1,7 @@ +""" +sqlThread is defined here +""" + import threading from bmconfigparser import BMConfigParser import sqlite3 @@ -19,11 +23,13 @@ import tr class sqlThread(threading.Thread): + """A thread for all SQL operations""" def __init__(self): threading.Thread.__init__(self, name="SQL") def run(self): + """Process SQL queries from `.helper_sql.sqlSubmitQueue`""" self.conn = sqlite3.connect(state.appdata + 'messages.dat') self.conn.text_factory = str self.cur = self.conn.cursor() diff --git a/src/helper_sql.py b/src/helper_sql.py index 2b558f62..138a9f50 100644 --- a/src/helper_sql.py +++ b/src/helper_sql.py @@ -1,17 +1,39 @@ -"""Helper Sql performs sql operations.""" +""" +SQL-related functions defined here are really pass the queries (or other SQL +commands) to :class:`.threads.sqlThread` through `sqlSubmitQueue` queue and check +or return the result got from `sqlReturnQueue`. + +This is done that way because :mod:`sqlite3` is so thread-unsafe that they +won't even let you call it from different threads using your own locks. +SQLite objects can only be used from one thread. + +.. note:: This actually only applies for certain deployments, and/or + really old version of sqlite. I haven't actually seen it anywhere. + Current versions do have support for threading and multiprocessing. + I don't see an urgent reason to refactor this, but it should be noted + in the comment that the problem is mostly not valid. Sadly, last time + I checked, there is no reliable way to check whether the library is + or isn't thread-safe. +""" import threading import Queue sqlSubmitQueue = Queue.Queue() -# SQLITE3 is so thread-unsafe that they won't even let you call it from different threads using your own locks. -# SQL objects #can only be called from one thread. +"""the queue for SQL""" sqlReturnQueue = Queue.Queue() +"""the queue for results""" sqlLock = threading.Lock() def sqlQuery(sqlStatement, *args): - """SQLLITE execute statement and return query.""" + """ + Query sqlite and return results + + :param str sqlStatement: SQL statement string + :param list args: SQL query parameters + :rtype: list + """ sqlLock.acquire() sqlSubmitQueue.put(sqlStatement) diff --git a/src/helper_threading.py b/src/helper_threading.py deleted file mode 100644 index 56dd7063..00000000 --- a/src/helper_threading.py +++ /dev/null @@ -1,21 +0,0 @@ -"""set_thread_name for threads that don't use StoppableThread""" - -import threading - -try: - import prctl -except ImportError: - def set_thread_name(name): - """Set the thread name for external use (visible from the OS).""" - threading.current_thread().name = name -else: - def set_thread_name(name): - """Set a name for the thread for python internal use.""" - prctl.set_name(name) - - def _thread_name_hack(self): - set_thread_name(self.name) - threading.Thread.__bootstrap_original__(self) - # pylint: disable=protected-access - threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap - threading.Thread._Thread__bootstrap = _thread_name_hack diff --git a/src/network/__init__.py b/src/network/__init__.py index e69de29b..51c4c4da 100644 --- a/src/network/__init__.py +++ b/src/network/__init__.py @@ -0,0 +1,17 @@ +from addrthread import AddrThread +from announcethread import AnnounceThread +from connectionpool import BMConnectionPool +from dandelion import Dandelion +from downloadthread import DownloadThread +from invthread import InvThread +from networkthread import BMNetworkThread +from receivequeuethread import ReceiveQueueThread +from threads import StoppableThread +from uploadthread import UploadThread + + +__all__ = [ + "BMConnectionPool", "Dandelion", + "AddrThread", "AnnounceThread", "BMNetworkThread", "DownloadThread", + "InvThread", "ReceiveQueueThread", "UploadThread", "StoppableThread" +] diff --git a/src/network/receivequeuethread.py b/src/network/receivequeuethread.py index 13c12ce2..cd904065 100644 --- a/src/network/receivequeuethread.py +++ b/src/network/receivequeuethread.py @@ -32,14 +32,12 @@ class ReceiveQueueThread(StoppableThread): try: connection = BMConnectionPool().getConnectionByAddr(dest) - # KeyError = connection object not found - except KeyError: + except KeyError: # connection object not found receiveDataQueue.task_done() continue try: connection.process() - # UnknownStateError = state isn't implemented - except UnknownStateError: + except UnknownStateError: # state isn't implemented pass except socket.error as err: if err.errno == errno.EBADF: diff --git a/src/shutdown.py b/src/shutdown.py index 1d40a90f..c81a519a 100644 --- a/src/shutdown.py +++ b/src/shutdown.py @@ -10,7 +10,7 @@ from debug import logger from helper_sql import sqlQuery, sqlStoredProcedure from inventory import Inventory from knownnodes import saveKnownNodes -from network.threads import StoppableThread +from network import StoppableThread from queues import ( addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue) diff --git a/src/threads.py b/src/threads.py new file mode 100644 index 00000000..08d61196 --- /dev/null +++ b/src/threads.py @@ -0,0 +1,46 @@ +""" +PyBitmessage does various tasks in separate threads. Most of them inherit +from `.network.StoppableThread`. There are `addressGenerator` for +addresses generation, `objectProcessor` for processing the network objects +passed minimal validation, `singleCleaner` to periodically clean various +internal storages (like inventory and knownnodes) and do forced garbage +collection, `singleWorker` for doing PoW, `sqlThread` for querying sqlite +database. + +There are also other threads in the `.network` package. + +:func:`set_thread_name` is defined here for the threads that don't inherit from +:class:`.network.StoppableThread` +""" + +import threading + +try: + import prctl +except ImportError: + def set_thread_name(name): + """Set a name for the thread for python internal use.""" + threading.current_thread().name = name +else: + def set_thread_name(name): + """Set the thread name for external use (visible from the OS).""" + prctl.set_name(name) + + def _thread_name_hack(self): + set_thread_name(self.name) + threading.Thread.__bootstrap_original__(self) + # pylint: disable=protected-access + threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap + threading.Thread._Thread__bootstrap = _thread_name_hack + +from class_addressGenerator import addressGenerator +from class_objectProcessor import objectProcessor +from class_singleCleaner import singleCleaner +from class_singleWorker import singleWorker +from class_sqlThread import sqlThread + + +__all__ = [ + "addressGenerator", "objectProcessor", "singleCleaner", "singleWorker", + "sqlThread" +] diff --git a/src/upnp.py b/src/upnp.py index b1ee2e7b..979b4186 100644 --- a/src/upnp.py +++ b/src/upnp.py @@ -21,8 +21,7 @@ import state import tr from bmconfigparser import BMConfigParser from debug import logger -from network.connectionpool import BMConnectionPool -from network.threads import StoppableThread +from network import BMConnectionPool, StoppableThread def createRequestXML(service, action, arguments=None): From 4d8d9b169f7326b189928f5b485aa7d88207245c Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Thu, 31 Oct 2019 14:13:36 +0200 Subject: [PATCH 35/70] Moved ObjectProcessorQueue to queues, added some doc --- docs/conf.py | 2 +- src/class_objectProcessorQueue.py | 24 ----------------- src/queues.py | 45 ++++++++++++++++++++++++++----- 3 files changed, 39 insertions(+), 32 deletions(-) delete mode 100644 src/class_objectProcessorQueue.py diff --git a/docs/conf.py b/docs/conf.py index b6e75cc1..f9283f38 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -229,7 +229,7 @@ apidoc_excluded_paths = [ 'bitmessageqt/newaddresswizard.py', 'class_objectProcessor.py', 'defaults.py', 'helper_startup.py', 'kivymd', 'main.py', 'navigationdrawer', 'network/http*', - 'pybitmessage', 'queues.py', 'tests', 'version.py' + 'pybitmessage', 'tests', 'version.py' ] apidoc_module_first = True apidoc_separate_modules = True diff --git a/src/class_objectProcessorQueue.py b/src/class_objectProcessorQueue.py deleted file mode 100644 index b6628816..00000000 --- a/src/class_objectProcessorQueue.py +++ /dev/null @@ -1,24 +0,0 @@ -import Queue -import threading -import time - -class ObjectProcessorQueue(Queue.Queue): - maxSize = 32000000 - - def __init__(self): - Queue.Queue.__init__(self) - self.sizeLock = threading.Lock() - self.curSize = 0 # in Bytes. We maintain this to prevent nodes from flooing us with objects which take up too much memory. If this gets too big we'll sleep before asking for further objects. - - def put(self, item, block = True, timeout = None): - while self.curSize >= self.maxSize: - time.sleep(1) - with self.sizeLock: - self.curSize += len(item[1]) - Queue.Queue.put(self, item, block, timeout) - - def get(self, block = True, timeout = None): - item = Queue.Queue.get(self, block, timeout) - with self.sizeLock: - self.curSize -= len(item[1]) - return item diff --git a/src/queues.py b/src/queues.py index 7b6bbade..d0ac77d0 100644 --- a/src/queues.py +++ b/src/queues.py @@ -1,20 +1,51 @@ -import Queue +"""Most of the queues used by bitmessage threads are defined here.""" + +import Queue +import threading +import time -from class_objectProcessorQueue import ObjectProcessorQueue from multiqueue import MultiQueue + +class ObjectProcessorQueue(Queue.Queue): + """Special queue class using lock for `.threads.objectProcessor`""" + + maxSize = 32000000 + + def __init__(self): + Queue.Queue.__init__(self) + self.sizeLock = threading.Lock() + #: in Bytes. We maintain this to prevent nodes from flooding us + #: with objects which take up too much memory. If this gets + #: too big we'll sleep before asking for further objects. + self.curSize = 0 + + def put(self, item, block=True, timeout=None): + while self.curSize >= self.maxSize: + time.sleep(1) + with self.sizeLock: + self.curSize += len(item[1]) + Queue.Queue.put(self, item, block, timeout) + + def get(self, block=True, timeout=None): + item = Queue.Queue.get(self, block, timeout) + with self.sizeLock: + self.curSize -= len(item[1]) + return item + + workerQueue = Queue.Queue() UISignalQueue = Queue.Queue() addressGeneratorQueue = Queue.Queue() -# receiveDataThreads dump objects they hear on the network into this -# queue to be processed. +#: receiveDataThreads dump objects they hear on the network into this +#: queue to be processed. objectProcessorQueue = ObjectProcessorQueue() invQueue = MultiQueue() addrQueue = MultiQueue() portCheckerQueue = Queue.Queue() receiveDataQueue = Queue.Queue() -# The address generator thread uses this queue to get information back -# to the API thread. +#: The address generator thread uses this queue to get information back +#: to the API thread. apiAddressGeneratorReturnQueue = Queue.Queue() -# Exceptions +#: for exceptions excQueue = Queue.Queue() From 7a1f803c92667d9a21f41a8d3341b0f46477ffa9 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Sun, 3 Nov 2019 14:09:00 +0200 Subject: [PATCH 36/70] network.BMConnectionPool: added shortcuts connections() and establishedConnections(), some formatting fixes --- src/class_singleCleaner.py | 18 ++++---- src/network/bmproto.py | 5 +- src/network/connectionpool.py | 87 ++++++++++++++++++----------------- src/network/downloadthread.py | 20 ++++---- src/network/invthread.py | 19 +++----- src/network/objectracker.py | 3 +- src/network/stats.py | 17 +------ src/network/uploadthread.py | 12 ++--- 8 files changed, 77 insertions(+), 104 deletions(-) diff --git a/src/class_singleCleaner.py b/src/class_singleCleaner.py index 4717c3cb..9ffc1607 100644 --- a/src/class_singleCleaner.py +++ b/src/class_singleCleaner.py @@ -1,5 +1,5 @@ """ -The singleCleaner class is a timer-driven thread that cleans data structures +The `singleCleaner` class is a timer-driven thread that cleans data structures to free memory, resends messages when a remote node doesn't respond, and sends pong messages to keep connections alive if the network isn't busy. @@ -45,12 +45,12 @@ class singleCleaner(StoppableThread): try: shared.maximumLengthOfTimeToBotherResendingMessages = ( float(BMConfigParser().get( - 'bitmessagesettings', 'stopresendingafterxdays')) * - 24 * 60 * 60 + 'bitmessagesettings', 'stopresendingafterxdays')) + * 24 * 60 * 60 ) + ( float(BMConfigParser().get( - 'bitmessagesettings', 'stopresendingafterxmonths')) * - (60 * 60 * 24 * 365) / 12) + 'bitmessagesettings', 'stopresendingafterxmonths')) + * (60 * 60 * 24 * 365) / 12) except: # Either the user hasn't set stopresendingafterxdays and # stopresendingafterxmonths yet or the options are missing @@ -92,8 +92,8 @@ class singleCleaner(StoppableThread): "SELECT toaddress, ackdata, status FROM sent" " WHERE ((status='awaitingpubkey' OR status='msgsent')" " AND folder='sent' AND sleeptill?)", - int(time.time()), int(time.time()) - - shared.maximumLengthOfTimeToBotherResendingMessages + int(time.time()), int(time.time()) + - shared.maximumLengthOfTimeToBotherResendingMessages ) for row in queryreturn: if len(row) < 2: @@ -139,9 +139,7 @@ class singleCleaner(StoppableThread): # thread.downloadQueue.clear() # inv/object tracking - for connection in \ - BMConnectionPool().inboundConnections.values() + \ - BMConnectionPool().outboundConnections.values(): + for connection in BMConnectionPool().connections(): connection.clean() # discovery tracking diff --git a/src/network/bmproto.py b/src/network/bmproto.py index 6375f393..86295b87 100644 --- a/src/network/bmproto.py +++ b/src/network/bmproto.py @@ -645,10 +645,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): @staticmethod def stopDownloadingObject(hashId, forwardAnyway=False): """Stop downloading an object""" - for connection in ( - connectionpool.BMConnectionPool().inboundConnections.values() + - connectionpool.BMConnectionPool().outboundConnections.values() - ): + for connection in connectionpool.BMConnectionPool().connections(): try: del connection.objectsNewToMe[hashId] except KeyError: diff --git a/src/network/connectionpool.py b/src/network/connectionpool.py index 1267522a..8f959356 100644 --- a/src/network/connectionpool.py +++ b/src/network/connectionpool.py @@ -1,6 +1,5 @@ """ -src/network/connectionpool.py -================================== +`BMConnectionPool` class definition """ import errno import logging @@ -26,9 +25,10 @@ logger = logging.getLogger('default') @Singleton -# pylint: disable=too-many-instance-attributes class BMConnectionPool(object): """Pool of all existing connections""" + # pylint: disable=too-many-instance-attributes + def __init__(self): asyncore.set_rates( BMConfigParser().safeGetInt( @@ -41,9 +41,21 @@ class BMConnectionPool(object): self.listeningSockets = {} self.udpSockets = {} self.streams = [] - self.lastSpawned = 0 - self.spawnWait = 2 - self.bootstrapped = False + self._lastSpawned = 0 + self._spawnWait = 2 + self._bootstrapped = False + + def connections(self): + """ + Shortcut for combined list of connections from + `inboundConnections` and `outboundConnections` dicts + """ + return self.inboundConnections.values() + self.outboundConnections.values() + + def establishedConnections(self): + """Shortcut for list of connections having fullyEstablished == True""" + return [ + x for x in self.connections() if x.fullyEstablished] def connectToStream(self, streamNumber): """Connect to a bitmessage stream""" @@ -74,10 +86,7 @@ class BMConnectionPool(object): def isAlreadyConnected(self, nodeid): """Check if we're already connected to this peer""" - for i in ( - self.inboundConnections.values() + - self.outboundConnections.values() - ): + for i in self.connections(): try: if nodeid == i.nodeid: return True @@ -129,10 +138,11 @@ class BMConnectionPool(object): "bitmessagesettings", "onionbindip") else: host = '127.0.0.1' - if (BMConfigParser().safeGetBoolean( - "bitmessagesettings", "sockslisten") or - BMConfigParser().safeGet( - "bitmessagesettings", "socksproxytype") == "none"): + if ( + BMConfigParser().safeGetBoolean("bitmessagesettings", "sockslisten") + or BMConfigParser().safeGet("bitmessagesettings", "socksproxytype") + == "none" + ): # python doesn't like bind + INADDR_ANY? # host = socket.INADDR_ANY host = BMConfigParser().get("network", "bind") @@ -205,11 +215,13 @@ class BMConnectionPool(object): 'bitmessagesettings', 'socksproxytype', '') onionsocksproxytype = BMConfigParser().safeGet( 'bitmessagesettings', 'onionsocksproxytype', '') - if (socksproxytype[:5] == 'SOCKS' and - not BMConfigParser().safeGetBoolean( - 'bitmessagesettings', 'sockslisten') and - '.onion' not in BMConfigParser().safeGet( - 'bitmessagesettings', 'onionhostname', '')): + if ( + socksproxytype[:5] == 'SOCKS' + and not BMConfigParser().safeGetBoolean( + 'bitmessagesettings', 'sockslisten') + and '.onion' not in BMConfigParser().safeGet( + 'bitmessagesettings', 'onionhostname', '') + ): acceptConnections = False # pylint: disable=too-many-nested-blocks @@ -217,8 +229,8 @@ class BMConnectionPool(object): if not knownnodes.knownNodesActual: self.startBootstrappers() knownnodes.knownNodesActual = True - if not self.bootstrapped: - self.bootstrapped = True + if not self._bootstrapped: + self._bootstrapped = True Proxy.proxy = ( BMConfigParser().safeGet( 'bitmessagesettings', 'sockshostname'), @@ -260,8 +272,7 @@ class BMConnectionPool(object): continue try: - if (chosen.host.endswith(".onion") and - Proxy.onion_proxy is not None): + if chosen.host.endswith(".onion") and Proxy.onion_proxy: if onionsocksproxytype == "SOCKS5": self.addConnection(Socks5BMConnection(chosen)) elif onionsocksproxytype == "SOCKS4a": @@ -276,12 +287,9 @@ class BMConnectionPool(object): if e.errno == errno.ENETUNREACH: continue - self.lastSpawned = time.time() + self._lastSpawned = time.time() else: - for i in ( - self.inboundConnections.values() + - self.outboundConnections.values() - ): + for i in self.connections(): # FIXME: rating will be increased after next connection i.handle_close() @@ -291,8 +299,8 @@ class BMConnectionPool(object): self.startListening() else: for bind in re.sub( - '[^\w.]+', ' ', # pylint: disable=anomalous-backslash-in-string - BMConfigParser().safeGet('network', 'bind') + r'[^\w.]+', ' ', + BMConfigParser().safeGet('network', 'bind') ).split(): self.startListening(bind) logger.info('Listening for incoming connections.') @@ -301,8 +309,8 @@ class BMConnectionPool(object): self.startUDPSocket() else: for bind in re.sub( - '[^\w.]+', ' ', # pylint: disable=anomalous-backslash-in-string - BMConfigParser().safeGet('network', 'bind') + r'[^\w.]+', ' ', + BMConfigParser().safeGet('network', 'bind') ).split(): self.startUDPSocket(bind) self.startUDPSocket(False) @@ -319,16 +327,13 @@ class BMConnectionPool(object): i.accepting = i.connecting = i.connected = False logger.info('Stopped udp sockets.') - loopTime = float(self.spawnWait) - if self.lastSpawned < time.time() - self.spawnWait: + loopTime = float(self._spawnWait) + if self._lastSpawned < time.time() - self._spawnWait: loopTime = 2.0 asyncore.loop(timeout=loopTime, count=1000) reaper = [] - for i in ( - self.inboundConnections.values() + - self.outboundConnections.values() - ): + for i in self.connections(): minTx = time.time() - 20 if i.fullyEstablished: minTx -= 300 - 20 @@ -340,10 +345,8 @@ class BMConnectionPool(object): time.time() - i.lastTx) i.set_state("close") for i in ( - self.inboundConnections.values() + - self.outboundConnections.values() + - self.listeningSockets.values() + - self.udpSockets.values() + self.connections() + + self.listeningSockets.values() + self.udpSockets.values() ): if not (i.accepting or i.connecting or i.connected): reaper.append(i) diff --git a/src/network/downloadthread.py b/src/network/downloadthread.py index 472b32c0..e882f6de 100644 --- a/src/network/downloadthread.py +++ b/src/network/downloadthread.py @@ -1,6 +1,5 @@ """ -src/network/downloadthread.py -============================= +`DownloadThread` class definition """ import time @@ -29,7 +28,7 @@ class DownloadThread(StoppableThread): def cleanPending(self): """Expire pending downloads eventually""" - deadline = time.time() - DownloadThread.requestExpires + deadline = time.time() - self.requestExpires try: toDelete = [k for k, v in missingObjects.iteritems() if v < deadline] except RuntimeError: @@ -43,15 +42,12 @@ class DownloadThread(StoppableThread): while not self._stopped: requested = 0 # Choose downloading peers randomly - connections = [ - x for x in - BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values() - if x.fullyEstablished] + connections = BMConnectionPool().establishedConnections() helper_random.randomshuffle(connections) - try: - requestChunk = max(int(min(DownloadThread.maxRequestChunk, len(missingObjects)) / len(connections)), 1) - except ZeroDivisionError: - requestChunk = 1 + requestChunk = max(int( + min(self.maxRequestChunk, len(missingObjects)) + / len(connections)), 1) if connections else 1 + for i in connections: now = time.time() # avoid unnecessary delay @@ -81,7 +77,7 @@ class DownloadThread(StoppableThread): '%s:%i Requesting %i objects', i.destination.host, i.destination.port, chunkCount) requested += chunkCount - if time.time() >= self.lastCleaned + DownloadThread.cleanInterval: + if time.time() >= self.lastCleaned + self.cleanInterval: self.cleanPending() if not requested: self.stop.wait(1) diff --git a/src/network/invthread.py b/src/network/invthread.py index bffa6ecb..d5690486 100644 --- a/src/network/invthread.py +++ b/src/network/invthread.py @@ -20,9 +20,7 @@ def handleExpiredDandelion(expired): the object""" if not expired: return - for i in \ - BMConnectionPool().inboundConnections.values() + \ - BMConnectionPool().outboundConnections.values(): + for i in BMConnectionPool().connections(): if not i.fullyEstablished: continue for x in expired: @@ -44,9 +42,7 @@ class InvThread(StoppableThread): def handleLocallyGenerated(stream, hashId): """Locally generated inventory items require special handling""" Dandelion().addHash(hashId, stream=stream) - for connection in \ - BMConnectionPool().inboundConnections.values() + \ - BMConnectionPool().outboundConnections.values(): + for connection in BMConnectionPool().connections(): if state.dandelion and connection != Dandelion().objectChildStem(hashId): continue connection.objectsNewToThem[hashId] = time() @@ -67,8 +63,7 @@ class InvThread(StoppableThread): break if chunk: - for connection in BMConnectionPool().inboundConnections.values() + \ - BMConnectionPool().outboundConnections.values(): + for connection in BMConnectionPool().connections(): fluffs = [] stems = [] for inv in chunk: @@ -96,13 +91,13 @@ class InvThread(StoppableThread): if fluffs: random.shuffle(fluffs) connection.append_write_buf(protocol.CreatePacket( - 'inv', addresses.encodeVarint(len(fluffs)) + - "".join(fluffs))) + 'inv', + addresses.encodeVarint(len(fluffs)) + ''.join(fluffs))) if stems: random.shuffle(stems) connection.append_write_buf(protocol.CreatePacket( - 'dinv', addresses.encodeVarint(len(stems)) + - "".join(stems))) + 'dinv', + addresses.encodeVarint(len(stems)) + ''.join(stems))) invQueue.iterate() for i in range(len(chunk)): diff --git a/src/network/objectracker.py b/src/network/objectracker.py index a8e3292a..b97aee46 100644 --- a/src/network/objectracker.py +++ b/src/network/objectracker.py @@ -95,8 +95,7 @@ class ObjectTracker(object): def handleReceivedObject(self, streamNumber, hashid): """Handling received object""" - for i in network.connectionpool.BMConnectionPool().inboundConnections.values( - ) + network.connectionpool.BMConnectionPool().outboundConnections.values(): + for i in network.connectionpool.BMConnectionPool().connections(): if not i.fullyEstablished: continue try: diff --git a/src/network/stats.py b/src/network/stats.py index fedfbbc1..d760ace2 100644 --- a/src/network/stats.py +++ b/src/network/stats.py @@ -19,16 +19,7 @@ currentSentSpeed = 0 def connectedHostsList(): """List of all the connected hosts""" - retval = [] - for i in BMConnectionPool().inboundConnections.values() + \ - BMConnectionPool().outboundConnections.values(): - if not i.fullyEstablished: - continue - try: - retval.append(i) - except AttributeError: - pass - return retval + return BMConnectionPool().establishedConnections() def sentBytes(): @@ -71,12 +62,6 @@ def downloadSpeed(): def pendingDownload(): """Getting pending downloads""" return len(missingObjects) - # tmp = {} - # for connection in BMConnectionPool().inboundConnections.values() + \ - # BMConnectionPool().outboundConnections.values(): - # for k in connection.objectsNewToMe.keys(): - # tmp[k] = True - # return len(tmp) def pendingUpload(): diff --git a/src/network/uploadthread.py b/src/network/uploadthread.py index 1b57bd9a..7d80d789 100644 --- a/src/network/uploadthread.py +++ b/src/network/uploadthread.py @@ -1,5 +1,5 @@ """ -src/network/uploadthread.py +`UploadThread` class definition """ import time @@ -22,19 +22,19 @@ class UploadThread(StoppableThread): def run(self): while not self._stopped: uploaded = 0 - # Choose downloading peers randomly - connections = [x for x in BMConnectionPool().inboundConnections.values() + - BMConnectionPool().outboundConnections.values() if x.fullyEstablished] + # Choose uploading peers randomly + connections = BMConnectionPool().establishedConnections() helper_random.randomshuffle(connections) for i in connections: now = time.time() # avoid unnecessary delay if i.skipUntil >= now: continue - if len(i.write_buf) > UploadThread.maxBufSize: + if len(i.write_buf) > self.maxBufSize: continue try: - request = i.pendingUpload.randomKeys(RandomTrackingDict.maxPending) + request = i.pendingUpload.randomKeys( + RandomTrackingDict.maxPending) except KeyError: continue payload = bytearray() From 0967f03b40a920e6e33453c04c4c0ea583131cb3 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Sun, 3 Nov 2019 14:10:21 +0200 Subject: [PATCH 37/70] addresses: raise varintEncodeError in encodeVarint() instead of SystemExit (looks like a bug) --- src/addresses.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/addresses.py b/src/addresses.py index 533ec169..b83f3f6e 100644 --- a/src/addresses.py +++ b/src/addresses.py @@ -54,11 +54,20 @@ def decodeBase58(string, alphabet=ALPHABET): return num +class varintEncodeError(Exception): + """Exception class for encoding varint""" + pass + + +class varintDecodeError(Exception): + """Exception class for decoding varint data""" + pass + + def encodeVarint(integer): """Convert integer into varint bytes""" if integer < 0: - logger.error('varint cannot be < 0') - raise SystemExit + raise varintEncodeError('varint cannot be < 0') if integer < 253: return pack('>B', integer) if integer >= 253 and integer < 65536: @@ -68,13 +77,7 @@ def encodeVarint(integer): if integer >= 4294967296 and integer < 18446744073709551616: return pack('>B', 255) + pack('>Q', integer) if integer >= 18446744073709551616: - logger.error('varint cannot be >= 18446744073709551616') - raise SystemExit - - -class varintDecodeError(Exception): - """Exception class for decoding varint data""" - pass + raise varintEncodeError('varint cannot be >= 18446744073709551616') def decodeVarint(data): From 388de9649568a06aa709193338856006e1165683 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Sun, 3 Nov 2019 14:13:18 +0200 Subject: [PATCH 38/70] Alphabetical internal import order in bitmessagemain --- src/bitmessagemain.py | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/src/bitmessagemain.py b/src/bitmessagemain.py index 81702783..c70eb0bf 100755 --- a/src/bitmessagemain.py +++ b/src/bitmessagemain.py @@ -7,8 +7,6 @@ # Right now, PyBitmessage only support connecting to stream 1. It doesn't # yet contain logic to expand into further streams. -# The software version variable is now held in shared.py - import os import sys @@ -31,24 +29,23 @@ import time import traceback from struct import pack -from helper_startup import ( - isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections -) -from singleinstance import singleinstance - import defaults import shared -import knownnodes import state import shutdown from bmconfigparser import BMConfigParser from debug import logger # this should go before any threads +from helper_startup import ( + isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections +) from inventory import Inventory +from knownnodes import readKnownNodes # Network objects and threads from network import ( BMConnectionPool, Dandelion, AddrThread, AnnounceThread, BMNetworkThread, InvThread, ReceiveQueueThread, DownloadThread, UploadThread) +from singleinstance import singleinstance # Synchronous threads from threads import ( set_thread_name, @@ -72,14 +69,6 @@ def connectToStream(streamNumber): except: pass - with knownnodes.knownNodesLock: - if streamNumber not in knownnodes.knownNodes: - knownnodes.knownNodes[streamNumber] = {} - if streamNumber * 2 not in knownnodes.knownNodes: - knownnodes.knownNodes[streamNumber * 2] = {} - if streamNumber * 2 + 1 not in knownnodes.knownNodes: - knownnodes.knownNodes[streamNumber * 2 + 1] = {} - BMConnectionPool().connectToStream(streamNumber) @@ -279,7 +268,7 @@ class Main: defaults.networkDefaultPayloadLengthExtraBytes = int( defaults.networkDefaultPayloadLengthExtraBytes / 100) - knownnodes.readKnownNodes() + readKnownNodes() # Not needed if objproc is disabled if state.enableObjProc: From d6c1845b711e763efa382432ec6d2a92438eb22f Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Sun, 3 Nov 2019 17:11:52 +0200 Subject: [PATCH 39/70] Moved Peer from state to network.node and trustedPeer to network.connectionpool.BMConnectionPool attribute --- src/class_objectProcessor.py | 3 ++- src/helper_startup.py | 26 ++---------------- src/knownnodes.py | 46 +++++++++++++++++--------------- src/network/announcethread.py | 5 +++- src/network/bmproto.py | 24 +++++++++-------- src/network/connectionchooser.py | 2 -- src/network/connectionpool.py | 31 +++++++++++++++++++-- src/network/node.py | 4 +-- src/network/proxy.py | 11 ++++---- src/network/socks5.py | 4 +-- src/network/tcp.py | 9 ++++--- src/network/udp.py | 9 ++++--- src/state.py | 17 ------------ src/tests/core.py | 7 ++--- src/upnp.py | 8 +++--- 15 files changed, 102 insertions(+), 104 deletions(-) diff --git a/src/class_objectProcessor.py b/src/class_objectProcessor.py index e2b95447..b22876e8 100644 --- a/src/class_objectProcessor.py +++ b/src/class_objectProcessor.py @@ -21,6 +21,7 @@ import helper_sent from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery from helper_ackPayload import genAckPayload from network import bmproto +from network.node import Peer import protocol import queues import state @@ -161,7 +162,7 @@ class objectProcessor(threading.Thread): if not host: return - peer = state.Peer(host, port) + peer = Peer(host, port) with knownnodes.knownNodesLock: knownnodes.addKnownNode( stream, peer, is_self=state.ownAddresses.get(peer)) diff --git a/src/helper_startup.py b/src/helper_startup.py index 1a1119f5..9aaad5ef 100644 --- a/src/helper_startup.py +++ b/src/helper_startup.py @@ -1,13 +1,9 @@ """ -src/helper_startup.py -===================== - -Helper Start performs all the startup operations. +Startup operations. """ # pylint: disable=too-many-branches,too-many-statements from __future__ import print_function -import ConfigParser import os import platform import sys @@ -19,28 +15,12 @@ import paths import state from bmconfigparser import BMConfigParser + # The user may de-select Portable Mode in the settings if they want # the config files to stay in the application data folder. StoreConfigFilesInSameDirectoryAsProgramByDefault = False -def _loadTrustedPeer(): - try: - trustedPeer = BMConfigParser().get('bitmessagesettings', 'trustedpeer') - except ConfigParser.Error: - # This probably means the trusted peer wasn't specified so we - # can just leave it as None - return - try: - host, port = trustedPeer.split(':') - except ValueError: - sys.exit( - 'Bad trustedpeer config setting! It should be set as' - ' trustedpeer=:' - ) - state.trustedPeer = state.Peer(host, int(port)) - - def loadConfig(): """Load the config""" config = BMConfigParser() @@ -134,8 +114,6 @@ def loadConfig(): else: updateConfig() - _loadTrustedPeer() - def updateConfig(): """Save the config""" diff --git a/src/knownnodes.py b/src/knownnodes.py index 1d9e6897..bb588fcb 100644 --- a/src/knownnodes.py +++ b/src/knownnodes.py @@ -3,6 +3,7 @@ Manipulations with knownNodes dictionary. """ import json +import logging import os import pickle import threading @@ -10,28 +11,33 @@ import time import state from bmconfigparser import BMConfigParser -from debug import logger +from network.node import Peer knownNodesLock = threading.Lock() +"""Thread lock for knownnodes modification""" knownNodes = {stream: {} for stream in range(1, 4)} +"""The dict of known nodes for each stream""" knownNodesTrimAmount = 2000 +"""trim stream knownnodes dict to this length""" -# forget a node after rating is this low knownNodesForgetRating = -0.5 +"""forget a node after rating is this low""" knownNodesActual = False +logger = logging.getLogger('default') + DEFAULT_NODES = ( - state.Peer('5.45.99.75', 8444), - state.Peer('75.167.159.54', 8444), - state.Peer('95.165.168.168', 8444), - state.Peer('85.180.139.241', 8444), - state.Peer('158.222.217.190', 8080), - state.Peer('178.62.12.187', 8448), - state.Peer('24.188.198.204', 8111), - state.Peer('109.147.204.113', 1195), - state.Peer('178.11.46.221', 8444) + Peer('5.45.99.75', 8444), + Peer('75.167.159.54', 8444), + Peer('95.165.168.168', 8444), + Peer('85.180.139.241', 8444), + Peer('158.222.217.190', 8080), + Peer('178.62.12.187', 8448), + Peer('24.188.198.204', 8111), + Peer('109.147.204.113', 1195), + Peer('178.11.46.221', 8444) ) @@ -57,19 +63,17 @@ def json_deserialize_knownnodes(source): for node in json.load(source): peer = node['peer'] info = node['info'] - peer = state.Peer(str(peer['host']), peer.get('port', 8444)) + peer = Peer(str(peer['host']), peer.get('port', 8444)) knownNodes[node['stream']][peer] = info - if ( - not (knownNodesActual or info.get('self')) and - peer not in DEFAULT_NODES - ): + if not (knownNodesActual + or info.get('self')) and peer not in DEFAULT_NODES: knownNodesActual = True def pickle_deserialize_old_knownnodes(source): """ - Unpickle source and reorganize knownnodes dict if it's in old format + Unpickle source and reorganize knownnodes dict if it has old format the old format was {Peer:lastseen, ...} the new format is {Peer:{"lastseen":i, "rating":f}} """ @@ -129,7 +133,7 @@ def readKnownNodes(): if onionhostname and ".onion" in onionhostname: onionport = config.safeGetInt('bitmessagesettings', 'onionport') if onionport: - self_peer = state.Peer(onionhostname, onionport) + self_peer = Peer(onionhostname, onionport) addKnownNode(1, self_peer, is_self=True) state.ownAddresses[self_peer] = True @@ -182,7 +186,7 @@ def dns(): """Add DNS names to knownnodes""" for port in [8080, 8444]: addKnownNode( - 1, state.Peer('bootstrap%s.bitmessage.org' % port, port)) + 1, Peer('bootstrap%s.bitmessage.org' % port, port)) def cleanupKnownNodes(): @@ -208,8 +212,8 @@ def cleanupKnownNodes(): del knownNodes[stream][node] continue # scrap old nodes (age > 3 hours) with low rating - if (age > 10800 and knownNodes[stream][node]["rating"] <= - knownNodesForgetRating): + if (age > 10800 and knownNodes[stream][node]["rating"] + <= knownNodesForgetRating): needToWriteKnownNodesToDisk = True del knownNodes[stream][node] continue diff --git a/src/network/announcethread.py b/src/network/announcethread.py index 5cd27ede..f635fc90 100644 --- a/src/network/announcethread.py +++ b/src/network/announcethread.py @@ -10,6 +10,7 @@ from bmconfigparser import BMConfigParser from network.bmproto import BMProto from network.connectionpool import BMConnectionPool from network.udp import UDPSocket +from node import Peer from threads import StoppableThread @@ -36,6 +37,8 @@ class AnnounceThread(StoppableThread): for stream in state.streamsInWhichIAmParticipating: addr = ( stream, - state.Peer('127.0.0.1', BMConfigParser().safeGetInt("bitmessagesettings", "port")), + Peer( + '127.0.0.1', + BMConfigParser().safeGetInt('bitmessagesettings', 'port')), time.time()) connection.append_write_buf(BMProto.assembleAddr([addr])) diff --git a/src/network/bmproto.py b/src/network/bmproto.py index 86295b87..bf0b5742 100644 --- a/src/network/bmproto.py +++ b/src/network/bmproto.py @@ -24,8 +24,8 @@ from network.bmobject import ( BMObject, BMObjectInsufficientPOWError, BMObjectInvalidDataError, BMObjectExpiredError, BMObjectUnwantedStreamError, BMObjectInvalidError, BMObjectAlreadyHaveError) -from network.node import Node from network.proxy import ProxyError +from node import Node, Peer from objectracker import missingObjects, ObjectTracker from queues import objectProcessorQueue, portCheckerQueue, invQueue, addrQueue from randomtrackingdict import RandomTrackingDict @@ -443,7 +443,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): seenTime > time.time() - BMProto.addressAlive and port > 0 ): - peer = state.Peer(decodedIP, port) + peer = Peer(decodedIP, port) try: if knownnodes.knownNodes[stream][peer]["lastseen"] > seenTime: continue @@ -464,7 +464,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): def bm_command_portcheck(self): """Incoming port check request, queue it.""" - portCheckerQueue.put(state.Peer(self.destination, self.peerNode.port)) + portCheckerQueue.put(Peer(self.destination, self.peerNode.port)) return True def bm_command_ping(self): @@ -594,12 +594,14 @@ class BMProto(AdvancedDispatcher, ObjectTracker): # incoming from a peer we're connected to as outbound, # or server full report the same error to counter deanonymisation if ( - state.Peer(self.destination.host, self.peerNode.port) in - connectionpool.BMConnectionPool().inboundConnections or - len(connectionpool.BMConnectionPool().inboundConnections) + - len(connectionpool.BMConnectionPool().outboundConnections) > - BMConfigParser().safeGetInt("bitmessagesettings", "maxtotalconnections") + - BMConfigParser().safeGetInt("bitmessagesettings", "maxbootstrapconnections") + Peer(self.destination.host, self.peerNode.port) + in connectionpool.BMConnectionPool().inboundConnections + or len(connectionpool.BMConnectionPool().inboundConnections) + + len(connectionpool.BMConnectionPool().outboundConnections) + > BMConfigParser().safeGetInt( + 'bitmessagesettings', 'maxtotalconnections') + + BMConfigParser().safeGetInt( + 'bitmessagesettings', 'maxbootstrapconnections') ): self.append_write_buf(protocol.assembleErrorMessage( errorText="Server full, please try again later.", fatal=2)) @@ -622,7 +624,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): @staticmethod def assembleAddr(peerList): """Build up a packed address""" - if isinstance(peerList, state.Peer): + if isinstance(peerList, Peer): peerList = (peerList) if not peerList: return b'' @@ -686,7 +688,7 @@ class BMStringParser(BMProto): """ def __init__(self): super(BMStringParser, self).__init__() - self.destination = state.Peer('127.0.0.1', 8444) + self.destination = Peer('127.0.0.1', 8444) self.payload = None ObjectTracker.__init__(self) diff --git a/src/network/connectionchooser.py b/src/network/connectionchooser.py index 838ca45d..9d2f85d6 100644 --- a/src/network/connectionchooser.py +++ b/src/network/connectionchooser.py @@ -28,8 +28,6 @@ def chooseConnection(stream): "bitmessagesettings", "socksproxytype")[0:5] == 'SOCKS' onionOnly = BMConfigParser().safeGetBoolean( "bitmessagesettings", "onionservicesonly") - if state.trustedPeer: - return state.trustedPeer try: retval = portCheckerQueue.get(False) portCheckerQueue.task_done() diff --git a/src/network/connectionpool.py b/src/network/connectionpool.py index 8f959356..654b74a1 100644 --- a/src/network/connectionpool.py +++ b/src/network/connectionpool.py @@ -5,6 +5,7 @@ import errno import logging import re import socket +import sys import time import asyncore_pollchoose as asyncore @@ -14,6 +15,7 @@ import protocol import state from bmconfigparser import BMConfigParser from connectionchooser import chooseConnection +from node import Peer from proxy import Proxy from singleton import Singleton from tcp import ( @@ -29,6 +31,19 @@ class BMConnectionPool(object): """Pool of all existing connections""" # pylint: disable=too-many-instance-attributes + trustedPeer = None + """ + If the trustedpeer option is specified in keys.dat then this will + contain a Peer which will be connected to instead of using the + addresses advertised by other peers. + + The expected use case is where the user has a trusted server where + they run a Bitmessage daemon permanently. If they then run a second + instance of the client on a local machine periodically when they want + to check for messages it will sync with the network a lot faster + without compromising security. + """ + def __init__(self): asyncore.set_rates( BMConfigParser().safeGetInt( @@ -45,6 +60,18 @@ class BMConnectionPool(object): self._spawnWait = 2 self._bootstrapped = False + trustedPeer = BMConfigParser().safeGet( + 'bitmessagesettings', 'trustedpeer') + try: + if trustedPeer: + host, port = trustedPeer.split(':') + self.trustedPeer = Peer(host, int(port)) + except ValueError: + sys.exit( + 'Bad trustedpeer config setting! It should be set as' + ' trustedpeer=:' + ) + def connections(self): """ Shortcut for combined list of connections from @@ -112,7 +139,7 @@ class BMConnectionPool(object): if isinstance(connection, UDPSocket): del self.udpSockets[connection.listening.host] elif isinstance(connection, TCPServer): - del self.listeningSockets[state.Peer( + del self.listeningSockets[Peer( connection.destination.host, connection.destination.port)] elif connection.isOutbound: try: @@ -259,7 +286,7 @@ class BMConnectionPool(object): for i in range( state.maximumNumberOfHalfOpenConnections - pending): try: - chosen = chooseConnection( + chosen = self.trustedPeer or chooseConnection( helper_random.randomchoice(self.streams)) except ValueError: continue diff --git a/src/network/node.py b/src/network/node.py index 0bfda653..4c532b81 100644 --- a/src/network/node.py +++ b/src/network/node.py @@ -1,7 +1,7 @@ """ -src/network/node.py -=================== +Named tuples representing the network peers """ import collections +Peer = collections.namedtuple('Peer', ['host', 'port']) Node = collections.namedtuple('Node', ['services', 'host', 'port']) diff --git a/src/network/proxy.py b/src/network/proxy.py index e65ac6a7..e0bb5e78 100644 --- a/src/network/proxy.py +++ b/src/network/proxy.py @@ -8,9 +8,9 @@ import socket import time import asyncore_pollchoose as asyncore -import state from advanceddispatcher import AdvancedDispatcher from bmconfigparser import BMConfigParser +from node import Peer logger = logging.getLogger('default') @@ -90,9 +90,10 @@ class Proxy(AdvancedDispatcher): def onion_proxy(self, address): """Set onion proxy address""" if address is not None and ( - not isinstance(address, tuple) or len(address) < 2 or - not isinstance(address[0], str) or - not isinstance(address[1], int)): + not isinstance(address, tuple) or len(address) < 2 + or not isinstance(address[0], str) + or not isinstance(address[1], int) + ): raise ValueError self.__class__._onion_proxy = address @@ -107,7 +108,7 @@ class Proxy(AdvancedDispatcher): self.__class__._onion_auth = authTuple def __init__(self, address): - if not isinstance(address, state.Peer): + if not isinstance(address, Peer): raise ValueError AdvancedDispatcher.__init__(self) self.destination = address diff --git a/src/network/socks5.py b/src/network/socks5.py index e0cb7202..f0241744 100644 --- a/src/network/socks5.py +++ b/src/network/socks5.py @@ -8,7 +8,7 @@ src/network/socks5.py import socket import struct -import state +from node import Peer from proxy import GeneralProxyError, Proxy, ProxyError @@ -200,7 +200,7 @@ class Socks5Resolver(Socks5): def __init__(self, host): self.host = host self.port = 8444 - Socks5.__init__(self, address=state.Peer(self.host, self.port)) + Socks5.__init__(self, address=Peer(self.host, self.port)) def state_auth_done(self): """Perform resolving""" diff --git a/src/network/tcp.py b/src/network/tcp.py index a1691ceb..97b00784 100644 --- a/src/network/tcp.py +++ b/src/network/tcp.py @@ -28,6 +28,7 @@ from network.objectracker import ObjectTracker from network.socks4a import Socks4aConnection from network.socks5 import Socks5Connection from network.tls import TLSDispatcher +from node import Peer from queues import UISignalQueue, invQueue, receiveDataQueue logger = logging.getLogger('default') @@ -49,7 +50,7 @@ class TCPConnection(BMProto, TLSDispatcher): self.connectedAt = 0 self.skipUntil = 0 if address is None and sock is not None: - self.destination = state.Peer(*sock.getpeername()) + self.destination = Peer(*sock.getpeername()) self.isOutbound = False TLSDispatcher.__init__(self, sock, server_side=True) self.connectedAt = time.time() @@ -334,7 +335,7 @@ def bootstrap(connection_class): _connection_base = connection_class def __init__(self, host, port): - self._connection_base.__init__(self, state.Peer(host, port)) + self._connection_base.__init__(self, Peer(host, port)) self.close_reason = self._succeed = False def bm_command_addr(self): @@ -384,7 +385,7 @@ class TCPServer(AdvancedDispatcher): 'bitmessagesettings', 'port', str(port)) BMConfigParser().save() break - self.destination = state.Peer(host, port) + self.destination = Peer(host, port) self.bound = True self.listen(5) @@ -402,7 +403,7 @@ class TCPServer(AdvancedDispatcher): except (TypeError, IndexError): return - state.ownAddresses[state.Peer(*sock.getsockname())] = True + state.ownAddresses[Peer(*sock.getsockname())] = True if ( len(connectionpool.BMConnectionPool().inboundConnections) + len(connectionpool.BMConnectionPool().outboundConnections) > diff --git a/src/network/udp.py b/src/network/udp.py index 97c6aee5..cf694567 100644 --- a/src/network/udp.py +++ b/src/network/udp.py @@ -9,6 +9,7 @@ import socket import state import protocol from bmproto import BMProto +from node import Peer from objectracker import ObjectTracker from queues import receiveDataQueue @@ -43,8 +44,8 @@ class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes else: self.socket = sock self.set_socket_reuse() - self.listening = state.Peer(*self.socket.getsockname()) - self.destination = state.Peer(*self.socket.getsockname()) + self.listening = Peer(*self.socket.getsockname()) + self.destination = Peer(*self.socket.getsockname()) ObjectTracker.__init__(self) self.connecting = False self.connected = True @@ -96,7 +97,7 @@ class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes self.destination.host, self.destination.port, remoteport) if self.local: state.discoveredPeers[ - state.Peer(self.destination.host, remoteport) + Peer(self.destination.host, remoteport) ] = time.time() return True @@ -131,7 +132,7 @@ class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes logger.error("socket error: %s", e) return - self.destination = state.Peer(*addr) + self.destination = Peer(*addr) encodedAddr = protocol.encodeHost(addr[0]) self.local = bool(protocol.checkIPAddress(encodedAddr, True)) # overwrite the old buffer to avoid mixing data and so that diff --git a/src/state.py b/src/state.py index a3b930ab..f5526029 100644 --- a/src/state.py +++ b/src/state.py @@ -1,7 +1,6 @@ """ Global runtime variables. """ -import collections neededPubkeys = {} streamsInWhichIAmParticipating = [] @@ -47,24 +46,8 @@ uploadThread = None ownAddresses = {} -trustedPeer = None -""" - If the trustedpeer option is specified in keys.dat then this will - contain a Peer which will be connected to instead of using the - addresses advertised by other peers. The client will only connect to - this peer and the timing attack mitigation will be disabled in order - to download data faster. The expected use case is where the user has - a fast connection to a trusted server where they run a BitMessage - daemon permanently. If they then run a second instance of the client - on a local machine periodically when they want to check for messages - it will sync with the network a lot faster without compromising - security. -""" - discoveredPeers = {} -Peer = collections.namedtuple('Peer', ['host', 'port']) - dandelion = 0 testmode = False diff --git a/src/tests/core.py b/src/tests/core.py index 8d24a768..3871946d 100644 --- a/src/tests/core.py +++ b/src/tests/core.py @@ -17,6 +17,7 @@ from bmconfigparser import BMConfigParser from helper_msgcoding import MsgEncode, MsgDecode from network import asyncore_pollchoose as asyncore from network.connectionpool import BMConnectionPool +from network.node import Peer from network.tcp import Socks4aBMConnection, Socks5BMConnection, TCPConnection from queues import excQueue @@ -30,7 +31,7 @@ def pickle_knownnodes(): with open(knownnodes_file, 'wb') as dst: pickle.dump({ stream: { - state.Peer( + Peer( '%i.%i.%i.%i' % tuple([ random.randint(1, 255) for i in range(4)]), 8444): {'lastseen': now, 'rating': 0.1} @@ -90,7 +91,7 @@ class TestCore(unittest.TestCase): """initial fill script from network.tcp""" BMConfigParser().set('bitmessagesettings', 'dontconnect', 'true') try: - for peer in (state.Peer("127.0.0.1", 8448),): + for peer in (Peer("127.0.0.1", 8448),): direct = TCPConnection(peer) while asyncore.socket_map: print("loop, state = %s" % direct.state) @@ -147,7 +148,7 @@ class TestCore(unittest.TestCase): def _initiate_bootstrap(self): BMConfigParser().set('bitmessagesettings', 'dontconnect', 'true') self._outdate_knownnodes() - knownnodes.addKnownNode(1, state.Peer('127.0.0.1', 8444), is_self=True) + knownnodes.addKnownNode(1, Peer('127.0.0.1', 8444), is_self=True) knownnodes.cleanupKnownNodes() time.sleep(2) diff --git a/src/upnp.py b/src/upnp.py index 979b4186..99000413 100644 --- a/src/upnp.py +++ b/src/upnp.py @@ -1,9 +1,6 @@ # pylint: disable=too-many-statements,too-many-branches,protected-access,no-self-use """ -src/upnp.py -=========== - -A simple upnp module to forward port for BitMessage +Complete UPnP port forwarding implementation in separate thread. Reference: http://mattscodecave.com/posts/using-python-and-upnp-to-forward-a-port """ @@ -22,6 +19,7 @@ import tr from bmconfigparser import BMConfigParser from debug import logger from network import BMConnectionPool, StoppableThread +from network.node import Peer def createRequestXML(service, action, arguments=None): @@ -262,7 +260,7 @@ class uPnPThread(StoppableThread): self.routers.append(newRouter) self.createPortMapping(newRouter) try: - self_peer = state.Peer( + self_peer = Peer( newRouter.GetExternalIPAddress(), self.extPort ) From c40c70f8073ca6fef9b48290da13de7c873a193f Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Mon, 4 Nov 2019 14:29:57 +0200 Subject: [PATCH 40/70] Marked variables comments in defaults for use in doc. Allowed autodoc in bitmessagemain, class_objectProcessor, defaults: seems safe now. Changed docs conf: don't sort module members, treat any string inside backticks as :obj:. --- docs/conf.py | 9 +++++---- src/defaults.py | 22 +++++++++++----------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index f9283f38..3464e056 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -49,6 +49,8 @@ extensions = [ 'm2r', ] +default_role = 'obj' + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -199,7 +201,6 @@ epub_exclude_files = ['search.html'] autodoc_mock_imports = [ 'debug', 'pybitmessage.bitmessagekivy', - 'pybitmessage.bitmessagemain', 'pybitmessage.bitmessageqt.addressvalidator', 'pybitmessage.helper_startup', 'pybitmessage.network.httpd', @@ -219,15 +220,15 @@ autodoc_mock_imports = [ 'qrcode', 'stem', ] +autodoc_member_order = 'bysource' # Apidoc settings apidoc_module_dir = '../pybitmessage' apidoc_output_dir = 'autodoc' apidoc_excluded_paths = [ - 'bitmessagekivy', 'bitmessagemain.py', 'build_osx.py', + 'bitmessagekivy', 'build_osx.py', 'bitmessageqt/addressvalidator.py', 'bitmessageqt/migrationwizard.py', - 'bitmessageqt/newaddresswizard.py', - 'class_objectProcessor.py', 'defaults.py', 'helper_startup.py', + 'bitmessageqt/newaddresswizard.py', 'helper_startup.py', 'kivymd', 'main.py', 'navigationdrawer', 'network/http*', 'pybitmessage', 'tests', 'version.py' ] diff --git a/src/defaults.py b/src/defaults.py index d10f9000..32162b56 100644 --- a/src/defaults.py +++ b/src/defaults.py @@ -1,24 +1,24 @@ """ -src/defaults.py -=============== +Common default values """ -# sanity check, prevent doing ridiculous PoW -# 20 million PoWs equals approximately 2 days on dev's dual R9 290 +#: sanity check, prevent doing ridiculous PoW +#: 20 million PoWs equals approximately 2 days on dev's dual R9 290 ridiculousDifficulty = 20000000 -# Remember here the RPC port read from namecoin.conf so we can restore to -# it as default whenever the user changes the "method" selection for -# namecoin integration to "namecoind". +#: Remember here the RPC port read from namecoin.conf so we can restore to +#: it as default whenever the user changes the "method" selection for +#: namecoin integration to "namecoind". namecoinDefaultRpcPort = "8336" # If changed, these values will cause particularly unexpected behavior: # You won't be able to either send or receive messages because the proof # of work you do (or demand) won't match that done or demanded by others. # Don't change them! -# The amount of work that should be performed (and demanded) per byte of the payload. +#: The amount of work that should be performed (and demanded) per byte +#: of the payload. networkDefaultProofOfWorkNonceTrialsPerByte = 1000 -# To make sending short messages a little more difficult, this value is -# added to the payload length for use in calculating the proof of work -# target. +#: To make sending short messages a little more difficult, this value is +#: added to the payload length for use in calculating the proof of work +#: target. networkDefaultPayloadLengthExtraBytes = 1000 From 35a29625526a5485d48a25fe8ad0f6264dd3cc2f Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Wed, 6 Nov 2019 11:38:42 +0200 Subject: [PATCH 41/70] Fixed misleading comment about receiveDataThreads in queues --- src/queues.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/queues.py b/src/queues.py index d0ac77d0..7d9e284a 100644 --- a/src/queues.py +++ b/src/queues.py @@ -37,8 +37,8 @@ class ObjectProcessorQueue(Queue.Queue): workerQueue = Queue.Queue() UISignalQueue = Queue.Queue() addressGeneratorQueue = Queue.Queue() -#: receiveDataThreads dump objects they hear on the network into this -#: queue to be processed. +#: `.network.ReceiveQueueThread` instances dump objects they hear +#: on the network into this queue to be processed. objectProcessorQueue = ObjectProcessorQueue() invQueue = MultiQueue() addrQueue = MultiQueue() From 7e1f1d2604c333ccc2eb548c07f9cf19051b6592 Mon Sep 17 00:00:00 2001 From: bug Lady Date: Thu, 14 Nov 2019 13:32:15 +0100 Subject: [PATCH 42/70] fix 'true' not True else error --- src/tests/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/core.py b/src/tests/core.py index 3871946d..d2456064 100644 --- a/src/tests/core.py +++ b/src/tests/core.py @@ -176,7 +176,7 @@ class TestCore(unittest.TestCase): def test_onionservicesonly(self): """test onionservicesonly networking mode""" - BMConfigParser().set('bitmessagesettings', 'onionservicesonly', True) + BMConfigParser().set('bitmessagesettings', 'onionservicesonly', 'true') self._initiate_bootstrap() BMConfigParser().remove_option('bitmessagesettings', 'dontconnect') for _ in range(360): From 2a165380bb7214afdcfd95b74dce83660bad53ff Mon Sep 17 00:00:00 2001 From: Peter Surda Date: Sat, 16 Nov 2019 11:52:36 +0100 Subject: [PATCH 43/70] Restrict outbound connections on network groups Logic borrowed from bitcoin, see CNetAddr::GetGroup() in src/netaddress.cpp Simplified, so may not work fully identically but for our purposes it's good enough. Won't connect to more than one host from a /16 subnet on IPv4 and a /32 subnet on IPv6. --- src/network/bmproto.py | 2 ++ src/network/connectionpool.py | 14 ++++++++++++ src/network/tcp.py | 1 + src/protocol.py | 28 ++++++++++++++++++++++++ src/tests/test_networkgroup.py | 39 ++++++++++++++++++++++++++++++++++ 5 files changed, 84 insertions(+) create mode 100644 src/tests/test_networkgroup.py diff --git a/src/network/bmproto.py b/src/network/bmproto.py index bf0b5742..11e96fd6 100644 --- a/src/network/bmproto.py +++ b/src/network/bmproto.py @@ -71,6 +71,8 @@ class BMProto(AdvancedDispatcher, ObjectTracker): # packet/connection from a local IP self.local = False self.pendingUpload = RandomTrackingDict() + # canonical identifier of network group + self.network_group = None def bm_proto_reset(self): """Reset the bitmessage object parser""" diff --git a/src/network/connectionpool.py b/src/network/connectionpool.py index 654b74a1..6264191d 100644 --- a/src/network/connectionpool.py +++ b/src/network/connectionpool.py @@ -229,6 +229,7 @@ class BMConnectionPool(object): def loop(self): # pylint: disable=too-many-branches,too-many-statements """Main Connectionpool's loop""" + # pylint: disable=too-many-locals # defaults to empty loop if outbound connections are maxed spawnConnections = False acceptConnections = True @@ -297,6 +298,19 @@ class BMConnectionPool(object): # don't connect to self if chosen in state.ownAddresses: continue + # don't connect to the hosts from the same + # network group, defense against sibyl attacks + host_network_group = protocol.network_group( + chosen.host) + same_group = False + for j in self.outboundConnections.values(): + if host_network_group == j.network_group: + same_group = True + if chosen.host == j.destination.host: + knownnodes.decreaseRating(chosen) + break + if same_group: + continue try: if chosen.host.endswith(".onion") and Proxy.onion_proxy: diff --git a/src/network/tcp.py b/src/network/tcp.py index 97b00784..31d20dea 100644 --- a/src/network/tcp.py +++ b/src/network/tcp.py @@ -84,6 +84,7 @@ class TCPConnection(BMProto, TLSDispatcher): ) except socket.error: pass # it's probably a hostname + self.network_group = protocol.network_group(self.destination.host) ObjectTracker.__init__(self) # pylint: disable=non-parent-init-called self.bm_proto_reset() self.set_state("bm_header", expectBytes=protocol.Header.size) diff --git a/src/protocol.py b/src/protocol.py index ef101a72..ec8fc9dd 100644 --- a/src/protocol.py +++ b/src/protocol.py @@ -105,6 +105,34 @@ def networkType(host): return 'IPv6' +def network_group(host): + """Canonical identifier of network group + simplified, borrowed from + GetGroup() in src/netaddresses.cpp in bitcoin core""" + if not isinstance(host, str): + return None + network_type = networkType(host) + try: + raw_host = encodeHost(host) + except socket.error: + return host + if network_type == 'IPv4': + decoded_host = checkIPv4Address(raw_host[12:], True) + if decoded_host: + # /16 subnet + return raw_host[12:14] + elif network_type == 'IPv6': + decoded_host = checkIPv6Address(raw_host, True) + if decoded_host: + # /32 subnet + return raw_host[0:12] + else: + # just host, e.g. for tor + return host + # global network type group for local, private, unroutable + return network_type + + def checkIPAddress(host, private=False): """Returns hostStandardFormat if it is a valid IP address, otherwise returns False""" if host[0:12] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF': diff --git a/src/tests/test_networkgroup.py b/src/tests/test_networkgroup.py new file mode 100644 index 00000000..76cfb033 --- /dev/null +++ b/src/tests/test_networkgroup.py @@ -0,0 +1,39 @@ +""" +Test for network group +""" +import unittest + + +class TestNetworkGroup(unittest.TestCase): + """ + Test case for network group + """ + def test_network_group(self): + """Test various types of network groups""" + from pybitmessage.protocol import network_group + + test_ip = '1.2.3.4' + self.assertEqual('\x01\x02', network_group(test_ip)) + + test_ip = '127.0.0.1' + self.assertEqual('IPv4', network_group(test_ip)) + + test_ip = '0102:0304:0506:0708:090A:0B0C:0D0E:0F10' + self.assertEqual( + '\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C', + network_group(test_ip)) + + test_ip = 'bootstrap8444.bitmessage.org' + self.assertEqual( + 'bootstrap8444.bitmessage.org', + network_group(test_ip)) + + test_ip = 'quzwelsuziwqgpt2.onion' + self.assertEqual( + test_ip, + network_group(test_ip)) + + test_ip = None + self.assertEqual( + None, + network_group(test_ip)) From f18f534c48fe0fb8dbc91d59779f2451d051fd66 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Mon, 28 Oct 2019 17:49:57 +0200 Subject: [PATCH 44/70] Formatted protocol and its docstrings --- src/protocol.py | 139 ++++++++++++++++++++++++++++++------------------ 1 file changed, 88 insertions(+), 51 deletions(-) diff --git a/src/protocol.py b/src/protocol.py index ec8fc9dd..cdd50dce 100644 --- a/src/protocol.py +++ b/src/protocol.py @@ -1,7 +1,8 @@ -# pylint: disable=too-many-boolean-expressions,too-many-return-statements,too-many-locals,too-many-statements """ Low-level protocol-related functions. """ +# pylint: disable=too-many-boolean-expressions,too-many-return-statements +# pylint: disable=too-many-locals,too-many-statements import base64 import hashlib @@ -9,7 +10,6 @@ import random import socket import sys import time -import traceback from binascii import hexlify from struct import pack, unpack, Struct @@ -24,10 +24,18 @@ from fallback import RIPEMD160Hash from helper_sql import sqlExecute from version import softwareVersion - # Service flags +#: This is a normal network node NODE_NETWORK = 1 +#: This node supports SSL/TLS in the current connect (python < 2.7.9 +#: only supports an SSL client, so in that case it would only have this +#: on when the connection is a client). NODE_SSL = 2 +# (Proposal) This node may do PoW on behalf of some its peers +# (PoW offloading/delegating), but it doesn't have to. Clients may have +# to meet additional requirements (e.g. TLS authentication) +# NODE_POW = 4 +#: Node supports dandelion NODE_DANDELION = 8 # Bitfield flags @@ -89,7 +97,8 @@ def isBitSetWithinBitfield(fourByteString, n): def encodeHost(host): """Encode a given host to be used in low-level socket operations""" if host.find('.onion') > -1: - return '\xfd\x87\xd8\x7e\xeb\x43' + base64.b32decode(host.split(".")[0], True) + return '\xfd\x87\xd8\x7e\xeb\x43' + base64.b32decode( + host.split(".")[0], True) elif host.find(':') == -1: return '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + \ socket.inet_aton(host) @@ -134,7 +143,10 @@ def network_group(host): def checkIPAddress(host, private=False): - """Returns hostStandardFormat if it is a valid IP address, otherwise returns False""" + """ + Returns hostStandardFormat if it is a valid IP address, + otherwise returns False + """ if host[0:12] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF': hostStandardFormat = socket.inet_ntop(socket.AF_INET, host[12:]) return checkIPv4Address(host[12:], hostStandardFormat, private) @@ -150,35 +162,46 @@ def checkIPAddress(host, private=False): except ValueError: return False if hostStandardFormat == "": - # This can happen on Windows systems which are not 64-bit compatible - # so let us drop the IPv6 address. + # This can happen on Windows systems which are + # not 64-bit compatible so let us drop the IPv6 address. return False return checkIPv6Address(host, hostStandardFormat, private) def checkIPv4Address(host, hostStandardFormat, private=False): - """Returns hostStandardFormat if it is an IPv4 address, otherwise returns False""" + """ + Returns hostStandardFormat if it is an IPv4 address, + otherwise returns False + """ if host[0] == '\x7F': # 127/8 if not private: - logger.debug('Ignoring IP address in loopback range: %s', hostStandardFormat) + logger.debug( + 'Ignoring IP address in loopback range: %s', + hostStandardFormat) return hostStandardFormat if private else False if host[0] == '\x0A': # 10/8 if not private: - logger.debug('Ignoring IP address in private range: %s', hostStandardFormat) + logger.debug( + 'Ignoring IP address in private range: %s', hostStandardFormat) return hostStandardFormat if private else False if host[0:2] == '\xC0\xA8': # 192.168/16 if not private: - logger.debug('Ignoring IP address in private range: %s', hostStandardFormat) + logger.debug( + 'Ignoring IP address in private range: %s', hostStandardFormat) return hostStandardFormat if private else False if host[0:2] >= '\xAC\x10' and host[0:2] < '\xAC\x20': # 172.16/12 if not private: - logger.debug('Ignoring IP address in private range: %s', hostStandardFormat) + logger.debug( + 'Ignoring IP address in private range: %s', hostStandardFormat) return hostStandardFormat if private else False return False if private else hostStandardFormat def checkIPv6Address(host, hostStandardFormat, private=False): - """Returns hostStandardFormat if it is an IPv6 address, otherwise returns False""" + """ + Returns hostStandardFormat if it is an IPv6 address, + otherwise returns False + """ if host == ('\x00' * 15) + '\x01': if not private: logger.debug('Ignoring loopback address: %s', hostStandardFormat) @@ -189,7 +212,8 @@ def checkIPv6Address(host, hostStandardFormat, private=False): return hostStandardFormat if private else False if (ord(host[0]) & 0xfe) == 0xfc: if not private: - logger.debug('Ignoring unique local address: %s', hostStandardFormat) + logger.debug( + 'Ignoring unique local address: %s', hostStandardFormat) return hostStandardFormat if private else False return False if private else hostStandardFormat @@ -210,31 +234,29 @@ def haveSSL(server=False): def checkSocksIP(host): """Predicate to check if we're using a SOCKS proxy""" + sockshostname = BMConfigParser().safeGet( + 'bitmessagesettings', 'sockshostname') try: - if state.socksIP is None or not state.socksIP: - state.socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname")) - # uninitialised - except NameError: - state.socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname")) - # resolving failure - except socket.gaierror: - state.socksIP = BMConfigParser().get("bitmessagesettings", "sockshostname") + if not state.socksIP: + state.socksIP = socket.gethostbyname(sockshostname) + except NameError: # uninitialised + state.socksIP = socket.gethostbyname(sockshostname) + except (TypeError, socket.gaierror): # None, resolving failure + state.socksIP = sockshostname return state.socksIP == host -def isProofOfWorkSufficient(data, - nonceTrialsPerByte=0, - payloadLengthExtraBytes=0, - recvTime=0): +def isProofOfWorkSufficient( + data, nonceTrialsPerByte=0, payloadLengthExtraBytes=0, recvTime=0): """ - Validate an object's Proof of Work using method described in: - https://bitmessage.org/wiki/Proof_of_work + Validate an object's Proof of Work using method described + `here `_ Arguments: - int nonceTrialsPerByte (default: from default.py) - int payloadLengthExtraBytes (default: from default.py) + int nonceTrialsPerByte (default: from `.defaults`) + int payloadLengthExtraBytes (default: from `.defaults`) float recvTime (optional) UNIX epoch time when object was - received from the network (default: current system time) + received from the network (default: current system time) Returns: True if PoW valid and sufficient, False in all other cases """ @@ -246,18 +268,20 @@ def isProofOfWorkSufficient(data, TTL = endOfLifeTime - (int(recvTime) if recvTime else int(time.time())) if TTL < 300: TTL = 300 - POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[ - :8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8]) - return POW <= 2 ** 64 / (nonceTrialsPerByte * - (len(data) + payloadLengthExtraBytes + - ((TTL * (len(data) + payloadLengthExtraBytes)) / (2 ** 16)))) + POW, = unpack('>Q', hashlib.sha512(hashlib.sha512( + data[:8] + hashlib.sha512(data[8:]).digest() + ).digest()).digest()[0:8]) + return POW <= 2 ** 64 / ( + nonceTrialsPerByte * ( + len(data) + payloadLengthExtraBytes + + ((TTL * (len(data) + payloadLengthExtraBytes)) / (2 ** 16)))) # Packet creation def CreatePacket(command, payload=''): - """Construct and return a number of bytes from a payload""" + """Construct and return a packet""" payload_length = len(payload) checksum = hashlib.sha512(payload).digest()[0:4] @@ -267,8 +291,13 @@ def CreatePacket(command, payload=''): return bytes(b) -def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server=False, nodeid=None): - """Construct the payload of a version message, return the resultng bytes of running CreatePacket() on it""" +def assembleVersionMessage( + remoteHost, remotePort, participatingStreams, server=False, nodeid=None +): + """ + Construct the payload of a version message, + return the resulting bytes of running `CreatePacket` on it + """ payload = '' payload += pack('>L', 3) # protocol version. # bitflags of the services I offer. @@ -280,9 +309,10 @@ def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server= ) payload += pack('>q', int(time.time())) - payload += pack( - '>q', 1) # boolservices of remote connection; ignored by the remote host. - if checkSocksIP(remoteHost) and server: # prevent leaking of tor outbound IP + # boolservices of remote connection; ignored by the remote host. + payload += pack('>q', 1) + if checkSocksIP(remoteHost) and server: + # prevent leaking of tor outbound IP payload += encodeHost('127.0.0.1') payload += pack('>H', 8444) else: @@ -301,21 +331,25 @@ def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server= (NODE_SSL if haveSSL(server) else 0) | (NODE_DANDELION if state.dandelion else 0) ) - # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used. - payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L', 2130706433) + # = 127.0.0.1. This will be ignored by the remote host. + # The actual remote connected IP will be used. + payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack( + '>L', 2130706433) # we have a separate extPort and incoming over clearnet # or outgoing through clearnet extport = BMConfigParser().safeGetInt('bitmessagesettings', 'extport') if ( extport and ((server and not checkSocksIP(remoteHost)) or ( - BMConfigParser().get('bitmessagesettings', 'socksproxytype') == - 'none' and not server)) + BMConfigParser().get('bitmessagesettings', 'socksproxytype') + == 'none' and not server)) ): payload += pack('>H', extport) elif checkSocksIP(remoteHost) and server: # incoming connection over Tor - payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'onionport')) + payload += pack( + '>H', BMConfigParser().getint('bitmessagesettings', 'onionport')) else: # no extport and not incoming over Tor - payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'port')) + payload += pack( + '>H', BMConfigParser().getint('bitmessagesettings', 'port')) if nodeid is not None: payload += nodeid[0:8] @@ -339,7 +373,10 @@ def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server= def assembleErrorMessage(fatal=0, banTime=0, inventoryVector='', errorText=''): - """Construct the payload of an error message, return the resultng bytes of running CreatePacket() on it""" + """ + Construct the payload of an error message, + return the resulting bytes of running `CreatePacket` on it + """ payload = encodeVarint(fatal) payload += encodeVarint(banTime) payload += encodeVarint(len(inventoryVector)) @@ -476,7 +513,7 @@ def decryptAndCheckPubkeyPayload(data, address): except Exception: logger.critical( 'Pubkey decryption was UNsuccessful because of' - ' an unhandled exception! This is definitely a bug! \n%s', - traceback.format_exc() + ' an unhandled exception! This is definitely a bug!', + exc_info=True ) return 'failed' From aa7e7dd6583d780e7520330ab3e3fb1e9ee357ff Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Mon, 4 Nov 2019 12:27:19 +0200 Subject: [PATCH 45/70] Fixed some docstrings in shared and state --- src/shared.py | 25 ++++++++++++++++++------- src/state.py | 8 ++++---- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/src/shared.py b/src/shared.py index 1a2add28..90cea89d 100644 --- a/src/shared.py +++ b/src/shared.py @@ -80,7 +80,9 @@ def isAddressInMySubscriptionsList(address): def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address): - """Am I subscribed to this address, is it in my addressbook or whitelist?""" + """ + Am I subscribed to this address, is it in my addressbook or whitelist? + """ if isAddressInMyAddressBook(address): return True @@ -100,8 +102,12 @@ def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address): return False -def decodeWalletImportFormat(WIFstring): # pylint: disable=inconsistent-return-statements - """Convert private key from base58 that's used in the config file to 8-bit binary string""" +def decodeWalletImportFormat(WIFstring): + # pylint: disable=inconsistent-return-statements + """ + Convert private key from base58 that's used in the config file to + 8-bit binary string + """ fullString = arithmetic.changebase(WIFstring, 58, 256) privkey = fullString[:-4] if fullString[-4:] != \ @@ -126,14 +132,15 @@ def decodeWalletImportFormat(WIFstring): # pylint: disable=inconsistent-retur def reloadMyAddressHashes(): - """Reinitialise runtime data (e.g. encryption objects, address hashes) from the config file""" + """Reload keys for user's addresses from the config file""" logger.debug('reloading keys from keys.dat file') myECCryptorObjects.clear() myAddressesByHash.clear() myAddressesByTag.clear() # myPrivateKeys.clear() - keyfileSecure = checkSensitiveFilePermissions(state.appdata + 'keys.dat') + keyfileSecure = checkSensitiveFilePermissions(os.path.join( + state.appdata, 'keys.dat')) hasEnabledKeys = False for addressInKeysFile in BMConfigParser().addresses(): isEnabled = BMConfigParser().getboolean(addressInKeysFile, 'enabled') @@ -162,11 +169,15 @@ def reloadMyAddressHashes(): ) if not keyfileSecure: - fixSensitiveFilePermissions(state.appdata + 'keys.dat', hasEnabledKeys) + fixSensitiveFilePermissions(os.path.join( + state.appdata, 'keys.dat'), hasEnabledKeys) def reloadBroadcastSendersForWhichImWatching(): - """Reinitialise runtime data for the broadcasts I'm subscribed to from the config file""" + """ + Reinitialize runtime data for the broadcasts I'm subscribed to + from the config file + """ broadcastSendersForWhichImWatching.clear() MyECSubscriptionCryptorObjects.clear() queryreturn = sqlQuery('SELECT address FROM subscriptions where enabled=1') diff --git a/src/state.py b/src/state.py index f5526029..58e1106a 100644 --- a/src/state.py +++ b/src/state.py @@ -16,8 +16,8 @@ appdata = '' shutdown = 0 """ - Set to 1 by the doCleanShutdown function. - Used to tell the proof of work worker threads to exit. +Set to 1 by the `.shutdown.doCleanShutdown` function. +Used to tell the threads to exit. """ # Component control flags - set on startup, do not change during runtime @@ -25,7 +25,7 @@ shutdown = 0 enableNetwork = True """enable network threads""" enableObjProc = True -"""enable object processing threads""" +"""enable object processing thread""" enableAPI = True """enable API (if configured)""" enableGUI = True @@ -35,7 +35,7 @@ enableSTDIO = False curses = False sqlReady = False -"""set to true by sqlTread when ready for processing""" +"""set to true by `.threads.sqlThread` when ready for processing""" maximumNumberOfHalfOpenConnections = 0 From a7da0c0eff1dcbea9b47124d3553f26f1d6f1c7b Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Mon, 4 Nov 2019 12:42:52 +0200 Subject: [PATCH 46/70] Fixed google style docstrings in addresses --- src/addresses.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/addresses.py b/src/addresses.py index b83f3f6e..bb0c9ec5 100644 --- a/src/addresses.py +++ b/src/addresses.py @@ -1,7 +1,5 @@ """ -src/addresses.py -================ - +Operations with addresses """ # pylint: disable=redefined-outer-name,inconsistent-return-statements @@ -18,8 +16,9 @@ ALPHABET = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" def encodeBase58(num, alphabet=ALPHABET): """Encode a number in Base X - `num`: The number to encode - `alphabet`: The alphabet to use for encoding + Args: + num: The number to encode + alphabet: The alphabet to use for encoding """ if num == 0: return alphabet[0] @@ -27,7 +26,6 @@ def encodeBase58(num, alphabet=ALPHABET): base = len(alphabet) while num: rem = num % base - # print 'num is:', num num = num // base arr.append(alphabet[rem]) arr.reverse() @@ -37,9 +35,9 @@ def encodeBase58(num, alphabet=ALPHABET): def decodeBase58(string, alphabet=ALPHABET): """Decode a Base X encoded string into the number - Arguments: - - `string`: The encoded string - - `alphabet`: The alphabet to use for encoding + Args: + string: The encoded string + alphabet: The alphabet to use for encoding """ base = len(alphabet) num = 0 From d9fa6a94f472432df48625e1a61be8c973227414 Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Mon, 4 Nov 2019 13:30:11 +0200 Subject: [PATCH 47/70] More docstrings and formatting fixes in highlevelcrypto and shutdown --- src/highlevelcrypto.py | 48 +++++++++++++++++++++++++----------------- src/shutdown.py | 12 +++++++---- 2 files changed, 37 insertions(+), 23 deletions(-) diff --git a/src/highlevelcrypto.py b/src/highlevelcrypto.py index 3d894ae8..f392fe4a 100644 --- a/src/highlevelcrypto.py +++ b/src/highlevelcrypto.py @@ -1,6 +1,10 @@ """ -src/highlevelcrypto.py -====================== +High level cryptographic functions based on `.pyelliptic` OpenSSL bindings. + +.. note:: + Upstream pyelliptic was upgraded from SHA1 to SHA256 for signing. + We must upgrade PyBitmessage gracefully. + `More discussion. `_ """ from binascii import hexlify @@ -12,12 +16,13 @@ from pyelliptic import arithmetic as a def makeCryptor(privkey): - """Return a private pyelliptic.ECC() instance""" + """Return a private `.pyelliptic.ECC` instance""" private_key = a.changebase(privkey, 16, 256, minlen=32) public_key = pointMult(private_key) privkey_bin = '\x02\xca\x00\x20' + private_key pubkey_bin = '\x02\xca\x00\x20' + public_key[1:-32] + '\x00\x20' + public_key[-32:] - cryptor = pyelliptic.ECC(curve='secp256k1', privkey=privkey_bin, pubkey=pubkey_bin) + cryptor = pyelliptic.ECC( + curve='secp256k1', privkey=privkey_bin, pubkey=pubkey_bin) return cryptor @@ -29,7 +34,7 @@ def hexToPubkey(pubkey): def makePubCryptor(pubkey): - """Return a public pyelliptic.ECC() instance""" + """Return a public `.pyelliptic.ECC` instance""" pubkey_bin = hexToPubkey(pubkey) return pyelliptic.ECC(curve='secp256k1', pubkey=pubkey_bin) @@ -43,7 +48,8 @@ def privToPub(privkey): def encrypt(msg, hexPubkey): """Encrypts message with hex public key""" - return pyelliptic.ECC(curve='secp256k1').encrypt(msg, hexToPubkey(hexPubkey)) + return pyelliptic.ECC(curve='secp256k1').encrypt( + msg, hexToPubkey(hexPubkey)) def decrypt(msg, hexPrivkey): @@ -52,36 +58,38 @@ def decrypt(msg, hexPrivkey): def decryptFast(msg, cryptor): - """Decrypts message with an existing pyelliptic.ECC.ECC object""" + """Decrypts message with an existing `.pyelliptic.ECC` object""" return cryptor.decrypt(msg) def sign(msg, hexPrivkey): - """Signs with hex private key""" - # pyelliptic is upgrading from SHA1 to SHA256 for signing. We must - # upgrade PyBitmessage gracefully. - # https://github.com/yann2192/pyelliptic/pull/33 - # More discussion: https://github.com/yann2192/pyelliptic/issues/32 - digestAlg = BMConfigParser().safeGet('bitmessagesettings', 'digestalg', 'sha1') + """ + Signs with hex private key using SHA1 or SHA256 depending on + "digestalg" setting + """ + digestAlg = BMConfigParser().safeGet( + 'bitmessagesettings', 'digestalg', 'sha1') if digestAlg == "sha1": # SHA1, this will eventually be deprecated - return makeCryptor(hexPrivkey).sign(msg, digest_alg=OpenSSL.digest_ecdsa_sha1) + return makeCryptor(hexPrivkey).sign( + msg, digest_alg=OpenSSL.digest_ecdsa_sha1) elif digestAlg == "sha256": # SHA256. Eventually this will become the default return makeCryptor(hexPrivkey).sign(msg, digest_alg=OpenSSL.EVP_sha256) else: - raise ValueError("Unknown digest algorithm %s" % (digestAlg)) + raise ValueError("Unknown digest algorithm %s" % digestAlg) def verify(msg, sig, hexPubkey): - """Verifies with hex public key""" + """Verifies with hex public key using SHA1 or SHA256""" # As mentioned above, we must upgrade gracefully to use SHA256. So # let us check the signature using both SHA1 and SHA256 and if one # of them passes then we will be satisfied. Eventually this can # be simplified and we'll only check with SHA256. try: # old SHA1 algorithm. - sigVerifyPassed = makePubCryptor(hexPubkey).verify(sig, msg, digest_alg=OpenSSL.digest_ecdsa_sha1) + sigVerifyPassed = makePubCryptor(hexPubkey).verify( + sig, msg, digest_alg=OpenSSL.digest_ecdsa_sha1) except: sigVerifyPassed = False if sigVerifyPassed: @@ -89,7 +97,8 @@ def verify(msg, sig, hexPubkey): return True # The signature check using SHA1 failed. Let us try it with SHA256. try: - return makePubCryptor(hexPubkey).verify(sig, msg, digest_alg=OpenSSL.EVP_sha256) + return makePubCryptor(hexPubkey).verify( + sig, msg, digest_alg=OpenSSL.EVP_sha256) except: return False @@ -106,7 +115,8 @@ def pointMult(secret): """ while True: try: - k = OpenSSL.EC_KEY_new_by_curve_name(OpenSSL.get_curve('secp256k1')) + k = OpenSSL.EC_KEY_new_by_curve_name( + OpenSSL.get_curve('secp256k1')) priv_key = OpenSSL.BN_bin2bn(secret, 32, None) group = OpenSSL.EC_KEY_get0_group(k) pub_key = OpenSSL.EC_POINT_new(group) diff --git a/src/shutdown.py b/src/shutdown.py index c81a519a..dbc2af04 100644 --- a/src/shutdown.py +++ b/src/shutdown.py @@ -16,7 +16,9 @@ from queues import ( def doCleanShutdown(): - """Used to tell proof of work worker threads and the objectProcessorThread to exit.""" + """ + Used to tell all the treads to finish work and exit. + """ state.shutdown = 1 objectProcessorQueue.put(('checkShutdownVariable', 'no data')) @@ -52,9 +54,11 @@ def doCleanShutdown(): time.sleep(.25) for thread in threading.enumerate(): - if (thread is not threading.currentThread() and - isinstance(thread, StoppableThread) and - thread.name != 'SQL'): + if ( + thread is not threading.currentThread() + and isinstance(thread, StoppableThread) + and thread.name != 'SQL' + ): logger.debug("Waiting for thread %s", thread.name) thread.join() From 49d731c47865d8895df7cbf0e08312bd8aba289e Mon Sep 17 00:00:00 2001 From: Dmitri Bogomolov <4glitch@gmail.com> Date: Fri, 15 Nov 2019 14:10:18 +0200 Subject: [PATCH 48/70] .readthedocs.yml --- .readthedocs.yml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .readthedocs.yml diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 00000000..474ae9ab --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,9 @@ +version: 2 + +python: + version: 2.7 + install: + - requirements: docs/requirements.txt + - method: setuptools + path: . + system_packages: true From af52d95503cc579035aa8035c0ded2281dc8c5ea Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 4 Nov 2019 20:14:45 +0530 Subject: [PATCH 49/70] bitmessagemain quality fixes --- src/bitmessagemain.py | 49 ++++++++++++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/src/bitmessagemain.py b/src/bitmessagemain.py index c70eb0bf..176125cc 100755 --- a/src/bitmessagemain.py +++ b/src/bitmessagemain.py @@ -1,4 +1,7 @@ #!/usr/bin/python2.7 +""" +The PyBitmessage startup script +""" # Copyright (c) 2012-2016 Jonathan Warren # Copyright (c) 2012-2019 The Bitmessage developers # Distributed under the MIT/X11 software license. See the accompanying @@ -53,6 +56,7 @@ from threads import ( def connectToStream(streamNumber): + """Connect to a stream""" state.streamsInWhichIAmParticipating.append(streamNumber) if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections(): @@ -85,6 +89,7 @@ def _fixSocket(): addressToString = ctypes.windll.ws2_32.WSAAddressToStringA def inet_ntop(family, host): + """Converting an IP address in packed binary format to string format""" if family == socket.AF_INET: if len(host) != 4: raise ValueError("invalid IPv4 host") @@ -106,6 +111,7 @@ def _fixSocket(): stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA def inet_pton(family, host): + """Converting an IP address in string format to a packed binary format""" buf = "\0" * 28 lengthBuf = pack("I", len(buf)) if stringToAddress(str(host), @@ -160,7 +166,8 @@ def signal_handler(signum, frame): ' because the UI captures the signal.') -class Main: +class Main(object): + """Main PyBitmessage class""" @staticmethod def start_proxyconfig(config): """Check socksproxytype and start any proxy configuration plugin""" @@ -183,14 +190,15 @@ class Main: 'Started proxy config plugin %s in %s sec', proxy_type, time.time() - proxyconfig_start) - def start(self): + def start(self): # pylint: disable=too-many-statements, too-many-branches, too-many-locals + """Start main application""" _fixSocket() config = BMConfigParser() daemon = config.safeGetBoolean('bitmessagesettings', 'daemon') try: - opts, args = getopt.getopt( + opts, _ = getopt.getopt( sys.argv[1:], "hcdt", ["help", "curses", "daemon", "test"]) @@ -198,7 +206,7 @@ class Main: self.usage() sys.exit(2) - for opt, arg in opts: + for opt, _ in opts: if opt in ("-h", "--help"): self.usage() sys.exit() @@ -412,7 +420,9 @@ class Main: else 0 ) - def daemonize(self): + @staticmethod + def daemonize(): + """Running as a daemon. Send signal in end.""" grandfatherPid = os.getpid() parentPid = None try: @@ -422,7 +432,7 @@ class Main: # wait until grandchild ready while True: time.sleep(1) - os._exit(0) + os._exit(0) # pylint: disable=protected-access except AttributeError: # fork not implemented pass @@ -443,7 +453,7 @@ class Main: # wait until child ready while True: time.sleep(1) - os._exit(0) + os._exit(0) # pylint: disable=protected-access except AttributeError: # fork not implemented pass @@ -464,14 +474,18 @@ class Main: os.kill(parentPid, signal.SIGTERM) os.kill(grandfatherPid, signal.SIGTERM) - def setSignalHandler(self): + @staticmethod + def setSignalHandler(): + """Setting the Signal Handler""" signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) # signal.signal(signal.SIGINT, signal.SIG_DFL) - def usage(self): - print 'Usage: ' + sys.argv[0] + ' [OPTIONS]' - print ''' + @staticmethod + def usage(): + """Displaying the usages""" + print('Usage: ' + sys.argv[0] + ' [OPTIONS]') + print(''' Options: -h, --help show this help message and exit -c, --curses use curses (text mode) interface @@ -479,15 +493,19 @@ Options: -t, --test dryrun, make testing All parameters are optional. -''' +''') - def stop(self): + @staticmethod + def stop(): + """Stop main application""" with shared.printLock: print('Stopping Bitmessage Deamon.') shutdown.doCleanShutdown() - # TODO: nice function but no one is using this - def getApiAddress(self): + # .. todo:: nice function but no one is using this + @staticmethod + def getApiAddress(): + """This function returns API address and port""" if not BMConfigParser().safeGetBoolean( 'bitmessagesettings', 'apienabled'): return None @@ -497,6 +515,7 @@ All parameters are optional. def main(): + """Triggers main module""" mainprogram = Main() mainprogram.start() From 77b8b5aa4236dbd11264797850dc4a1961841f89 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 4 Nov 2019 20:15:02 +0530 Subject: [PATCH 50/70] bmconfigparser quality fixes --- src/bmconfigparser.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/bmconfigparser.py b/src/bmconfigparser.py index 7f28d1b8..1851144d 100644 --- a/src/bmconfigparser.py +++ b/src/bmconfigparser.py @@ -58,7 +58,7 @@ class BMConfigParser(ConfigParser.SafeConfigParser): raise ValueError("Invalid value %s" % value) return ConfigParser.ConfigParser.set(self, section, option, value) - def get(self, section, option, raw=False, variables=None): + def get(self, section, option, raw=False, variables=None): # pylint: disable=arguments-differ try: if section == "bitmessagesettings" and option == "timeformat": return ConfigParser.ConfigParser.get( @@ -86,6 +86,7 @@ class BMConfigParser(ConfigParser.SafeConfigParser): self._temp[section] = {option: value} def safeGetBoolean(self, section, field): + """Return value as boolean, False on exceptions""" try: return self.getboolean(section, field) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError, @@ -93,6 +94,7 @@ class BMConfigParser(ConfigParser.SafeConfigParser): return False def safeGetInt(self, section, field, default=0): + """Return value as integer, default on exceptions, 0 if default missing""" try: return self.getint(section, field) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError, @@ -100,18 +102,22 @@ class BMConfigParser(ConfigParser.SafeConfigParser): return default def safeGet(self, section, option, default=None): + """Return value as is, default on exceptions, None if default missing""" try: return self.get(section, option) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError, ValueError, AttributeError): return default - def items(self, section, raw=False, variables=None): + def items(self, section, raw=False, variables=None): # pylint: disable=arguments-differ + """Return section variables as parent, but override the "raw" argument to always True""" return ConfigParser.ConfigParser.items(self, section, True, variables) - def addresses(self): - return filter( - lambda x: x.startswith('BM-'), BMConfigParser().sections()) + @staticmethod + def addresses(): + """Return a list of local bitmessage addresses (from section labels)""" + return [ + x for x in BMConfigParser().sections() if x.startswith('BM-')] def read(self, filenames): ConfigParser.ConfigParser.read(self, filenames) @@ -132,6 +138,7 @@ class BMConfigParser(ConfigParser.SafeConfigParser): continue def save(self): + """Save the runtime config onto the filesystem""" fileName = os.path.join(state.appdata, 'keys.dat') fileNameBak = '.'.join([ fileName, datetime.now().strftime("%Y%j%H%M%S%f"), 'bak']) @@ -153,12 +160,15 @@ class BMConfigParser(ConfigParser.SafeConfigParser): os.remove(fileNameBak) def validate(self, section, option, value): + """Input validator interface (using factory pattern)""" try: return getattr(self, 'validate_%s_%s' % (section, option))(value) except AttributeError: return True - def validate_bitmessagesettings_maxoutboundconnections(self, value): + @staticmethod + def validate_bitmessagesettings_maxoutboundconnections(value): + """Reject maxoutboundconnections that are too high or too low""" try: value = int(value) except ValueError: From e534994ee34e3d95908bafbb54289d9b1220259c Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 4 Nov 2019 20:15:18 +0530 Subject: [PATCH 51/70] class_addressGenerator quality fixes --- src/class_addressGenerator.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/class_addressGenerator.py b/src/class_addressGenerator.py index c7c7e261..9e19cf50 100644 --- a/src/class_addressGenerator.py +++ b/src/class_addressGenerator.py @@ -1,4 +1,6 @@ - +""" +A thread for creating addresses +""" import time import hashlib from binascii import hexlify @@ -18,6 +20,7 @@ from network import StoppableThread class addressGenerator(StoppableThread): + """A thread for creating addresses""" name = "addressGenerator" @@ -33,6 +36,7 @@ class addressGenerator(StoppableThread): Process the requests for addresses generation from `.queues.addressGeneratorQueue` """ + # pylint: disable=too-many-locals, too-many-branches, protected-access, too-many-statements while state.shutdown == 0: queueValue = queues.addressGeneratorQueue.get() nonceTrialsPerByte = 0 @@ -212,7 +216,7 @@ class addressGenerator(StoppableThread): elif command == 'createDeterministicAddresses' \ or command == 'getDeterministicAddress' \ or command == 'createChan' or command == 'joinChan': - if len(deterministicPassphrase) == 0: + if not deterministicPassphrase: self.logger.warning( 'You are creating deterministic' ' address(es) using a blank passphrase.' @@ -361,7 +365,7 @@ class addressGenerator(StoppableThread): address) shared.myECCryptorObjects[ripe] = \ highlevelcrypto.makeCryptor( - hexlify(potentialPrivEncryptionKey)) + hexlify(potentialPrivEncryptionKey)) shared.myAddressesByHash[ripe] = address tag = hashlib.sha512(hashlib.sha512( encodeVarint(addressVersionNumber) + From 059e82e2a2b1d831e83a274ca2fa10f1dc9b51ff Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 4 Nov 2019 20:15:36 +0530 Subject: [PATCH 52/70] class_objectProcessor quality fixes --- src/class_objectProcessor.py | 37 ++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/src/class_objectProcessor.py b/src/class_objectProcessor.py index b22876e8..2c741661 100644 --- a/src/class_objectProcessor.py +++ b/src/class_objectProcessor.py @@ -1,7 +1,9 @@ +""" +The objectProcessor thread, of which there is only one, processes the network objects +""" import hashlib import logging import random -import shared import threading import time from binascii import hexlify @@ -9,11 +11,13 @@ from subprocess import call # nosec import highlevelcrypto import knownnodes +import shared from addresses import ( calculateInventoryHash, decodeAddress, decodeVarint, encodeAddress, encodeVarint, varintDecodeError ) from bmconfigparser import BMConfigParser + import helper_bitcoin import helper_inbox import helper_msgcoding @@ -22,12 +26,15 @@ from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery from helper_ackPayload import genAckPayload from network import bmproto from network.node import Peer + import protocol import queues import state import tr from fallback import RIPEMD160Hash + import l10n +# pylint: disable=too-many-locals, too-many-return-statements, too-many-branches, too-many-statements logger = logging.getLogger('default') @@ -122,7 +129,10 @@ class objectProcessor(threading.Thread): state.shutdown = 2 break - def checkackdata(self, data): + @staticmethod + def checkackdata(data): + """Checking Acknowledgement of message received or not?""" + # pylint: disable=protected-access # Let's check whether this is a message acknowledgement bound for us. if len(data) < 32: return @@ -272,6 +282,7 @@ class objectProcessor(threading.Thread): queues.workerQueue.put(('sendOutOrStoreMyV4Pubkey', myAddress)) def processpubkey(self, data): + """Process a pubkey object""" pubkeyProcessingStartTime = time.time() shared.numberOfPubkeysProcessed += 1 queues.UISignalQueue.put(( @@ -444,6 +455,7 @@ class objectProcessor(threading.Thread): timeRequiredToProcessPubkey) def processmsg(self, data): + """Process a message object""" messageProcessingStartTime = time.time() shared.numberOfMessagesProcessed += 1 queues.UISignalQueue.put(( @@ -739,7 +751,7 @@ class objectProcessor(threading.Thread): # We really should have a discussion about how to # set the TTL for mailing list broadcasts. This is obviously # hard-coded. - TTL = 2*7*24*60*60 # 2 weeks + TTL = 2 * 7 * 24 * 60 * 60 # 2 weeks t = ('', toAddress, ripe, @@ -791,6 +803,7 @@ class objectProcessor(threading.Thread): ) def processbroadcast(self, data): + """Process a broadcast object""" messageProcessingStartTime = time.time() shared.numberOfBroadcastsProcessed += 1 queues.UISignalQueue.put(( @@ -975,7 +988,7 @@ class objectProcessor(threading.Thread): fromAddress = encodeAddress( sendersAddressVersion, sendersStream, calculatedRipe) - logger.info('fromAddress: %s' % fromAddress) + logger.info('fromAddress: %s', fromAddress) # Let's store the public key in case we want to reply to this person. sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', @@ -992,7 +1005,7 @@ class objectProcessor(threading.Thread): fromAddress = encodeAddress( sendersAddressVersion, sendersStream, calculatedRipe) - logger.debug('fromAddress: ' + fromAddress) + logger.debug('fromAddress: %s', fromAddress) try: decodedMessage = helper_msgcoding.MsgDecode( @@ -1060,7 +1073,8 @@ class objectProcessor(threading.Thread): del state.neededPubkeys[tag] self.sendMessages(address) - def sendMessages(self, address): + @staticmethod + def sendMessages(address): """ This method is called by the `possibleNewPubkey` when it sees that we now have the necessary pubkey to send one or more messages. @@ -1073,7 +1087,9 @@ class objectProcessor(threading.Thread): " AND folder='sent'", address) queues.workerQueue.put(('sendmessage', '')) - def ackDataHasAValidHeader(self, ackData): + @staticmethod + def ackDataHasAValidHeader(ackData): + """Checking ackData with valid Header, not sending ackData when false""" if len(ackData) < protocol.Header.size: logger.info( 'The length of ackData is unreasonably short. Not sending' @@ -1108,11 +1124,12 @@ class objectProcessor(threading.Thread): return False return True - def addMailingListNameToSubject(self, subject, mailingListName): + @staticmethod + def addMailingListNameToSubject(subject, mailingListName): + """Adding mailingListName to subject""" subject = subject.strip() if subject[:3] == 'Re:' or subject[:3] == 'RE:': subject = subject[3:].strip() if '[' + mailingListName + ']' in subject: return subject - else: - return '[' + mailingListName + '] ' + subject + return '[' + mailingListName + '] ' + subject From 80b2bc1c9a4ae650b7d5ecf099338ac8d2df633c Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 4 Nov 2019 20:17:36 +0530 Subject: [PATCH 53/70] class_singleCleaner.py quality fixes --- src/class_singleCleaner.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/src/class_singleCleaner.py b/src/class_singleCleaner.py index 9ffc1607..68a5e727 100644 --- a/src/class_singleCleaner.py +++ b/src/class_singleCleaner.py @@ -21,11 +21,11 @@ It resends messages when there has been no response: import gc import os -import shared import time import knownnodes import queues +import shared import state import tr from bmconfigparser import BMConfigParser @@ -35,11 +35,12 @@ from network import BMConnectionPool, StoppableThread class singleCleaner(StoppableThread): + """The singleCleaner thread class""" name = "singleCleaner" cycleLength = 300 expireDiscoveredPeers = 300 - def run(self): + def run(self): # pylint: disable=too-many-branches gc.disable() timeWeLastClearedInventoryAndPubkeysTables = 0 try: @@ -115,6 +116,7 @@ class singleCleaner(StoppableThread): # while writing it to disk knownnodes.cleanupKnownNodes() except Exception as err: + # pylint: disable=protected-access if "Errno 28" in str(err): self.logger.fatal( '(while writing knownnodes to disk)' @@ -127,17 +129,11 @@ class singleCleaner(StoppableThread): "MainWindow", 'Alert: Your disk or data storage volume' ' is full. Bitmessage will now exit.'), - True) + True) )) - # FIXME redundant? - if shared.daemon or not state.enableGUI: + if shared.thisapp.daemon or not state.enableGUI: os._exit(1) -# # clear download queues -# for thread in threading.enumerate(): -# if thread.isAlive() and hasattr(thread, 'downloadQueue'): -# thread.downloadQueue.clear() - # inv/object tracking for connection in BMConnectionPool().connections(): connection.clean() @@ -150,7 +146,7 @@ class singleCleaner(StoppableThread): del state.discoveredPeers[k] except KeyError: pass - # TODO: cleanup pending upload / download + # ..todo:: cleanup pending upload / download gc.collect() From 9923e972797738606235f3817062eed4dd8d8d98 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 4 Nov 2019 20:21:22 +0530 Subject: [PATCH 54/70] class_singleWorker quality fixes --- src/class_singleWorker.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/class_singleWorker.py b/src/class_singleWorker.py index 60eabe2e..a275b79d 100644 --- a/src/class_singleWorker.py +++ b/src/class_singleWorker.py @@ -1,6 +1,5 @@ """ -src/class_singleWorker.py -========================= +Thread for performing PoW """ # pylint: disable=protected-access,too-many-branches,too-many-statements,no-self-use,too-many-lines,too-many-locals @@ -468,8 +467,8 @@ class singleWorker(StoppableThread): def sendOnionPeerObj(self, peer=None): """Send onionpeer object representing peer""" if not peer: # find own onionhostname - for peer in state.ownAddresses: - if peer.host.endswith('.onion'): + for peer_ in state.ownAddresses: + if peer_.host.endswith('.onion'): break else: return From 4a54c200d42119f36e9e2a4e72b7ef257c4945c9 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 4 Nov 2019 20:21:54 +0530 Subject: [PATCH 55/70] class_smtpServer quality fixes --- src/class_smtpServer.py | 47 +++++++++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/src/class_smtpServer.py b/src/class_smtpServer.py index 924333a6..cdf867a9 100644 --- a/src/class_smtpServer.py +++ b/src/class_smtpServer.py @@ -1,3 +1,6 @@ +""" +SMTP server thread +""" import asyncore import base64 import email @@ -22,10 +25,13 @@ SMTPDOMAIN = "bmaddr.lan" LISTENPORT = 8425 logger = logging.getLogger('default') +# pylint: disable=attribute-defined-outside-init class smtpServerChannel(smtpd.SMTPChannel): + """Asyncore channel for SMTP protocol (server)""" def smtp_EHLO(self, arg): + """Process an EHLO""" if not arg: self.push('501 Syntax: HELO hostname') return @@ -33,14 +39,16 @@ class smtpServerChannel(smtpd.SMTPChannel): self.push('250 AUTH PLAIN') def smtp_AUTH(self, arg): + """Process AUTH""" if not arg or arg[0:5] not in ["PLAIN"]: self.push('501 Syntax: AUTH PLAIN') return authstring = arg[6:] try: decoded = base64.b64decode(authstring) - correctauth = "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdusername", "") + \ - "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdpassword", "") + correctauth = "\x00" + BMConfigParser().safeGet( + "bitmessagesettings", "smtpdusername", "") + "\x00" + BMConfigParser().safeGet( + "bitmessagesettings", "smtpdpassword", "") logger.debug('authstring: %s / %s', correctauth, decoded) if correctauth == decoded: self.auth = True @@ -51,6 +59,7 @@ class smtpServerChannel(smtpd.SMTPChannel): self.push('501 Authentication fail') def smtp_DATA(self, arg): + """Process DATA""" if not hasattr(self, "auth") or not self.auth: self.push('530 Authentication required') return @@ -58,15 +67,18 @@ class smtpServerChannel(smtpd.SMTPChannel): class smtpServerPyBitmessage(smtpd.SMTPServer): + """Asyncore SMTP server class""" def handle_accept(self): + """Accept a connection""" pair = self.accept() if pair is not None: conn, addr = pair # print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr) self.channel = smtpServerChannel(self, conn, addr) - def send(self, fromAddress, toAddress, subject, message): - status, addressVersionNumber, streamNumber, ripe = decodeAddress(toAddress) + def send(self, fromAddress, toAddress, subject, message): # pylint: disable=arguments-differ + """Send a bitmessage""" + streamNumber, ripe = decodeAddress(toAddress)[2:] stealthLevel = BMConfigParser().safeGetInt('bitmessagesettings', 'ackstealthlevel') ackdata = genAckPayload(streamNumber, stealthLevel) sqlExecute( @@ -78,19 +90,21 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): subject, message, ackdata, - int(time.time()), # sentTime (this will never change) - int(time.time()), # lastActionTime - 0, # sleepTill time. This will get set when the POW gets done. + int(time.time()), # sentTime (this will never change) + int(time.time()), # lastActionTime + 0, # sleepTill time. This will get set when the POW gets done. 'msgqueued', - 0, # retryNumber - 'sent', # folder - 2, # encodingtype - min(BMConfigParser().getint('bitmessagesettings', 'ttl'), 86400 * 2) # not necessary to have a TTL higher than 2 days + 0, # retryNumber + 'sent', # folder + 2, # encodingtype + # not necessary to have a TTL higher than 2 days + min(BMConfigParser().getint('bitmessagesettings', 'ttl'), 86400 * 2) ) queues.workerQueue.put(('sendmessage', toAddress)) def decode_header(self, hdr): + """Email header decoding""" ret = [] for h in decode_header(self.msg_headers[hdr]): if h[1]: @@ -100,7 +114,9 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): return ret - def process_message(self, peer, mailfrom, rcpttos, data): + def process_message(self, peer, mailfrom, rcpttos, data): # pylint: disable=too-many-locals, too-many-branches + """Process an email""" + # print 'Receiving message from:', peer p = re.compile(".*<([^>]+)>") if not hasattr(self.channel, "auth") or not self.channel.auth: logger.error('Missing or invalid auth') @@ -158,7 +174,8 @@ class smtpServerPyBitmessage(smtpd.SMTPServer): class smtpServer(StoppableThread): - def __init__(self, parent=None): + """SMTP server thread""" + def __init__(self, _=None): super(smtpServer, self).__init__(name="smtpServerThread") self.server = smtpServerPyBitmessage(('127.0.0.1', LISTENPORT), None) @@ -171,7 +188,8 @@ class smtpServer(StoppableThread): asyncore.loop(1) -def signals(signal, frame): +def signals(_, __): + """Signal handler""" logger.warning('Got signal, terminating') for thread in threading.enumerate(): if thread.isAlive() and isinstance(thread, StoppableThread): @@ -179,6 +197,7 @@ def signals(signal, frame): def runServer(): + """Run SMTP server as a standalone python process""" logger.warning('Running SMTPd thread') smtpThread = smtpServer() smtpThread.start() From dbbf454c1501cad342282cf38a4ca83f552446ed Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Sat, 5 Oct 2019 13:01:48 +0530 Subject: [PATCH 56/70] class_sqlThread flake8 fixes --- src/class_sqlThread.py | 264 ++++++++++++++++++++++++++++++----------- 1 file changed, 196 insertions(+), 68 deletions(-) diff --git a/src/class_sqlThread.py b/src/class_sqlThread.py index bcb56303..7df75137 100644 --- a/src/class_sqlThread.py +++ b/src/class_sqlThread.py @@ -38,30 +38,38 @@ class sqlThread(threading.Thread): try: self.cur.execute( - '''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text, received text, message text, folder text, encodingtype int, read bool, sighash blob, UNIQUE(msgid) ON CONFLICT REPLACE)''' ) + '''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text,''' + ''' received text, message text, folder text, encodingtype int, read bool, sighash blob,''' + ''' UNIQUE(msgid) ON CONFLICT REPLACE)''') self.cur.execute( - '''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, senttime integer, lastactiontime integer, sleeptill integer, status text, retrynumber integer, folder text, encodingtype int, ttl int)''' ) + '''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text,''' + ''' message text, ackdata blob, senttime integer, lastactiontime integer,''' + ''' sleeptill integer, status text, retrynumber integer, folder text, encodingtype int, ttl int)''') self.cur.execute( - '''CREATE TABLE subscriptions (label text, address text, enabled bool)''' ) + '''CREATE TABLE subscriptions (label text, address text, enabled bool)''') self.cur.execute( - '''CREATE TABLE addressbook (label text, address text)''' ) + '''CREATE TABLE addressbook (label text, address text)''') self.cur.execute( - '''CREATE TABLE blacklist (label text, address text, enabled bool)''' ) + '''CREATE TABLE blacklist (label text, address text, enabled bool)''') self.cur.execute( - '''CREATE TABLE whitelist (label text, address text, enabled bool)''' ) + '''CREATE TABLE whitelist (label text, address text, enabled bool)''') self.cur.execute( - '''CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''' ) + '''CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int,''' + ''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''') self.cur.execute( - '''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''' ) + '''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob,''' + ''' expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''') self.cur.execute( - '''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''') + '''INSERT INTO subscriptions VALUES''' + '''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''') self.cur.execute( - '''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''' ) - self.cur.execute( '''INSERT INTO settings VALUES('version','10')''') - self.cur.execute( '''INSERT INTO settings VALUES('lastvacuumtime',?)''', ( + '''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''') + self.cur.execute('''INSERT INTO settings VALUES('version','10')''') + self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', ( int(time.time()),)) self.cur.execute( - '''CREATE TABLE objectprocessorqueue (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' ) + '''CREATE TABLE objectprocessorqueue''' + ''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') self.conn.commit() logger.info('Created messages database file') except Exception as err: @@ -126,33 +134,38 @@ class sqlThread(threading.Thread): logger.debug( "In messages.dat database, creating new 'settings' table.") self.cur.execute( - '''CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE)''' ) - self.cur.execute( '''INSERT INTO settings VALUES('version','1')''') - self.cur.execute( '''INSERT INTO settings VALUES('lastvacuumtime',?)''', ( + '''CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE)''') + self.cur.execute('''INSERT INTO settings VALUES('version','1')''') + self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', ( int(time.time()),)) logger.debug('In messages.dat database, removing an obsolete field from the pubkeys table.') self.cur.execute( - '''CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE);''') + '''CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int,''' + ''' usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE);''') self.cur.execute( '''INSERT INTO pubkeys_backup SELECT hash, transmitdata, time, usedpersonally FROM pubkeys;''') - self.cur.execute( '''DROP TABLE pubkeys''') + self.cur.execute('''DROP TABLE pubkeys''') self.cur.execute( - '''CREATE TABLE pubkeys (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE)''' ) + '''CREATE TABLE pubkeys''' + ''' (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE)''') self.cur.execute( '''INSERT INTO pubkeys SELECT hash, transmitdata, time, usedpersonally FROM pubkeys_backup;''') - self.cur.execute( '''DROP TABLE pubkeys_backup;''') - logger.debug('Deleting all pubkeys from inventory. They will be redownloaded and then saved with the correct times.') + self.cur.execute('''DROP TABLE pubkeys_backup;''') + logger.debug( + 'Deleting all pubkeys from inventory.' + ' They will be redownloaded and then saved with the correct times.') self.cur.execute( '''delete from inventory where objecttype = 'pubkey';''') logger.debug('replacing Bitmessage announcements mailing list with a new one.') self.cur.execute( '''delete from subscriptions where address='BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx' ''') self.cur.execute( - '''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''') + '''INSERT INTO subscriptions VALUES''' + '''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''') logger.debug('Commiting.') self.conn.commit() logger.debug('Vacuuming message.dat. You might notice that the file size gets much smaller.') - self.cur.execute( ''' VACUUM ''') + self.cur.execute(''' VACUUM ''') # After code refactoring, the possible status values for sent messages # have changed. @@ -176,15 +189,21 @@ class sqlThread(threading.Thread): 'In messages.dat database, removing an obsolete field from' ' the inventory table.') self.cur.execute( - '''CREATE TEMPORARY TABLE inventory_backup(hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE);''') + '''CREATE TEMPORARY TABLE inventory_backup''' + '''(hash blob, objecttype text, streamnumber int, payload blob,''' + ''' receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE);''') self.cur.execute( - '''INSERT INTO inventory_backup SELECT hash, objecttype, streamnumber, payload, receivedtime FROM inventory;''') - self.cur.execute( '''DROP TABLE inventory''') + '''INSERT INTO inventory_backup SELECT hash, objecttype, streamnumber, payload, receivedtime''' + ''' FROM inventory;''') + self.cur.execute('''DROP TABLE inventory''') self.cur.execute( - '''CREATE TABLE inventory (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE)''' ) + '''CREATE TABLE inventory''' + ''' (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer,''' + ''' UNIQUE(hash) ON CONFLICT REPLACE)''') self.cur.execute( - '''INSERT INTO inventory SELECT hash, objecttype, streamnumber, payload, receivedtime FROM inventory_backup;''') - self.cur.execute( '''DROP TABLE inventory_backup;''') + '''INSERT INTO inventory SELECT hash, objecttype, streamnumber, payload, receivedtime''' + ''' FROM inventory_backup;''') + self.cur.execute('''DROP TABLE inventory_backup;''') item = '''update settings set value=? WHERE key='version';''' parameters = (3,) self.cur.execute(item, parameters) @@ -214,7 +233,8 @@ class sqlThread(threading.Thread): if currentVersion == 4: self.cur.execute('''DROP TABLE pubkeys''') self.cur.execute( - '''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''') + '''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int,''' + '''usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''') self.cur.execute( '''delete from inventory where objecttype = 'pubkey';''') item = '''update settings set value=? WHERE key='version';''' @@ -230,7 +250,8 @@ class sqlThread(threading.Thread): if currentVersion == 5: self.cur.execute('''DROP TABLE knownnodes''') self.cur.execute( - '''CREATE TABLE objectprocessorqueue (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') + '''CREATE TABLE objectprocessorqueue''' + ''' (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') item = '''update settings set value=? WHERE key='version';''' parameters = (6,) self.cur.execute(item, parameters) @@ -246,10 +267,15 @@ class sqlThread(threading.Thread): logger.debug( 'In messages.dat database, dropping and recreating' ' the inventory table.') - self.cur.execute( '''DROP TABLE inventory''') - self.cur.execute( '''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''' ) - self.cur.execute( '''DROP TABLE objectprocessorqueue''') - self.cur.execute( '''CREATE TABLE objectprocessorqueue (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' ) + self.cur.execute('''DROP TABLE inventory''') + self.cur.execute( + '''CREATE TABLE inventory''' + ''' (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer,''' + ''' tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''') + self.cur.execute('''DROP TABLE objectprocessorqueue''') + self.cur.execute( + '''CREATE TABLE objectprocessorqueue''' + ''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') item = '''update settings set value=? WHERE key='version';''' parameters = (7,) self.cur.execute(item, parameters) @@ -311,15 +337,24 @@ class sqlThread(threading.Thread): ' fields into the retrynumber field and adding the' ' sleeptill and ttl fields...') self.cur.execute( - '''CREATE TEMPORARY TABLE sent_backup (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, lastactiontime integer, status text, retrynumber integer, folder text, encodingtype int)''' ) + '''CREATE TEMPORARY TABLE sent_backup''' + ''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,''' + ''' ackdata blob, lastactiontime integer, status text, retrynumber integer,''' + ''' folder text, encodingtype int)''') self.cur.execute( - '''INSERT INTO sent_backup SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, 0, folder, encodingtype FROM sent;''') - self.cur.execute( '''DROP TABLE sent''') + '''INSERT INTO sent_backup SELECT msgid, toaddress, toripe, fromaddress,''' + ''' subject, message, ackdata, lastactiontime,''' + ''' status, 0, folder, encodingtype FROM sent;''') + self.cur.execute('''DROP TABLE sent''') self.cur.execute( - '''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, senttime integer, lastactiontime integer, sleeptill int, status text, retrynumber integer, folder text, encodingtype int, ttl int)''' ) + '''CREATE TABLE sent''' + ''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,''' + ''' ackdata blob, senttime integer, lastactiontime integer, sleeptill int, status text,''' + ''' retrynumber integer, folder text, encodingtype int, ttl int)''') self.cur.execute( - '''INSERT INTO sent SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, lastactiontime, 0, status, 0, folder, encodingtype, 216000 FROM sent_backup;''') - self.cur.execute( '''DROP TABLE sent_backup''') + '''INSERT INTO sent SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata,''' + ''' lastactiontime, lastactiontime, 0, status, 0, folder, encodingtype, 216000 FROM sent_backup;''') + self.cur.execute('''DROP TABLE sent_backup''') logger.info('In messages.dat database, finished making TTL-related changes.') logger.debug('In messages.dat database, adding address field to the pubkeys table.') # We're going to have to calculate the address for each row in the pubkeys @@ -336,16 +371,24 @@ class sqlThread(threading.Thread): self.cur.execute(item, parameters) # Now we can remove the hash field from the pubkeys table. self.cur.execute( - '''CREATE TEMPORARY TABLE pubkeys_backup (address text, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''' ) + '''CREATE TEMPORARY TABLE pubkeys_backup''' + ''' (address text, addressversion int, transmitdata blob, time int,''' + ''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''') self.cur.execute( - '''INSERT INTO pubkeys_backup SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys;''') - self.cur.execute( '''DROP TABLE pubkeys''') + '''INSERT INTO pubkeys_backup''' + ''' SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys;''') + self.cur.execute('''DROP TABLE pubkeys''') self.cur.execute( - '''CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''' ) + '''CREATE TABLE pubkeys''' + ''' (address text, addressversion int, transmitdata blob, time int, usedpersonally text,''' + ''' UNIQUE(address) ON CONFLICT REPLACE)''') self.cur.execute( - '''INSERT INTO pubkeys SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys_backup;''') - self.cur.execute( '''DROP TABLE pubkeys_backup''') - logger.debug('In messages.dat database, done adding address field to the pubkeys table and removing the hash field.') + '''INSERT INTO pubkeys SELECT''' + ''' address, addressversion, transmitdata, time, usedpersonally FROM pubkeys_backup;''') + self.cur.execute('''DROP TABLE pubkeys_backup''') + logger.debug( + 'In messages.dat database, done adding address field to the pubkeys table' + ' and removing the hash field.') self.cur.execute('''update settings set value=10 WHERE key='version';''') # Are you hoping to add a new option to the keys.dat file of existing @@ -355,7 +398,7 @@ class sqlThread(threading.Thread): try: testpayload = '\x00\x00' t = ('1234', 1, testpayload, '12345678', 'no') - self.cur.execute( '''INSERT INTO pubkeys VALUES(?,?,?,?,?)''', t) + self.cur.execute('''INSERT INTO pubkeys VALUES(?,?,?,?,?)''', t) self.conn.commit() self.cur.execute( '''SELECT transmitdata FROM pubkeys WHERE address='1234' ''') @@ -365,13 +408,29 @@ class sqlThread(threading.Thread): self.cur.execute('''DELETE FROM pubkeys WHERE address='1234' ''') self.conn.commit() if transmitdata == '': - logger.fatal('Problem: The version of SQLite you have cannot store Null values. Please download and install the latest revision of your version of Python (for example, the latest Python 2.7 revision) and try again.\n') - logger.fatal('PyBitmessage will now exit very abruptly. You may now see threading errors related to this abrupt exit but the problem you need to solve is related to SQLite.\n\n') + logger.fatal( + 'Problem: The version of SQLite you have cannot store Null values.' + ' Please download and install the latest revision of your version of Python' + ' (for example, the latest Python 2.7 revision) and try again.\n') + logger.fatal( + 'PyBitmessage will now exit very abruptly.' + ' You may now see threading errors related to this abrupt exit' + ' but the problem you need to solve is related to SQLite.\n\n') os._exit(0) except Exception as err: if str(err) == 'database or disk is full': - logger.fatal('(While null value test) Alert: Your disk or data storage volume is full. sqlThread will now exit.') - queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True))) + logger.fatal( + '(While null value test) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + tr._translate( + "MainWindow", + "Disk full"), + tr._translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) os._exit(0) else: logger.error(err) @@ -387,11 +446,21 @@ class sqlThread(threading.Thread): if int(value) < int(time.time()) - 86400: logger.info('It has been a long time since the messages.dat file has been vacuumed. Vacuuming now...') try: - self.cur.execute( ''' VACUUM ''') + self.cur.execute(''' VACUUM ''') except Exception as err: if str(err) == 'database or disk is full': - logger.fatal('(While VACUUM) Alert: Your disk or data storage volume is full. sqlThread will now exit.') - queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True))) + logger.fatal( + '(While VACUUM) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + tr._translate( + "MainWindow", + "Disk full"), + tr._translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) os._exit(0) item = '''update settings set value=? WHERE key='lastvacuumtime';''' parameters = (int(time.time()),) @@ -406,8 +475,18 @@ class sqlThread(threading.Thread): self.conn.commit() except Exception as err: if str(err) == 'database or disk is full': - logger.fatal('(While committing) Alert: Your disk or data storage volume is full. sqlThread will now exit.') - queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True))) + logger.fatal( + '(While committing) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + tr._translate( + "MainWindow", + "Disk full"), + tr._translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) os._exit(0) elif item == 'exit': self.conn.close() @@ -421,8 +500,18 @@ class sqlThread(threading.Thread): self.conn.commit() except Exception as err: if str(err) == 'database or disk is full': - logger.fatal('(while movemessagstoprog) Alert: Your disk or data storage volume is full. sqlThread will now exit.') - queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True))) + logger.fatal( + '(while movemessagstoprog) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + tr._translate( + "MainWindow", + "Disk full"), + tr._translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) os._exit(0) self.conn.close() shutil.move( @@ -437,8 +526,18 @@ class sqlThread(threading.Thread): self.conn.commit() except Exception as err: if str(err) == 'database or disk is full': - logger.fatal('(while movemessagstoappdata) Alert: Your disk or data storage volume is full. sqlThread will now exit.') - queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True))) + logger.fatal( + '(while movemessagstoappdata) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + tr._translate( + "MainWindow", + "Disk full"), + tr._translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) os._exit(0) self.conn.close() shutil.move( @@ -451,11 +550,21 @@ class sqlThread(threading.Thread): self.cur.execute('''delete from sent where folder='trash' ''') self.conn.commit() try: - self.cur.execute( ''' VACUUM ''') + self.cur.execute(''' VACUUM ''') except Exception as err: if str(err) == 'database or disk is full': - logger.fatal('(while deleteandvacuume) Alert: Your disk or data storage volume is full. sqlThread will now exit.') - queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True))) + logger.fatal( + '(while deleteandvacuume) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + tr._translate( + "MainWindow", + "Disk full"), + tr._translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) os._exit(0) else: parameters = helper_sql.sqlSubmitQueue.get() @@ -467,11 +576,30 @@ class sqlThread(threading.Thread): rowcount = self.cur.rowcount except Exception as err: if str(err) == 'database or disk is full': - logger.fatal('(while cur.execute) Alert: Your disk or data storage volume is full. sqlThread will now exit.') - queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True))) + logger.fatal( + '(while cur.execute) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + tr._translate( + "MainWindow", + "Disk full"), + tr._translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) os._exit(0) else: - logger.fatal('Major error occurred when trying to execute a SQL statement within the sqlThread. Please tell Atheros about this error message or post it in the forum! Error occurred while trying to execute statement: "%s" Here are the parameters; you might want to censor this data with asterisks (***) as it can contain private information: %s. Here is the actual error message thrown by the sqlThread: %s', str(item), str(repr(parameters)), str(err)) + logger.fatal( + 'Major error occurred when trying to execute a SQL statement within the sqlThread.' + ' Please tell Atheros about this error message or post it in the forum!' + ' Error occurred while trying to execute statement: "%s" Here are the parameters;' + ' you might want to censor this data with asterisks (***)' + ' as it can contain private information: %s.' + ' Here is the actual error message thrown by the sqlThread: %s', + str(item), + str(repr(parameters)), + str(err)) logger.fatal('This program shall now abruptly exit!') os._exit(0) From a9991a7a5a240aca228e0d655b2e756293a731b3 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Sat, 5 Oct 2019 15:22:28 +0530 Subject: [PATCH 57/70] class_sqlThread pylint fixes --- src/class_sqlThread.py | 9 ++++----- src/depends.py | 4 ++-- src/helper_inbox.py | 3 ++- src/helper_search.py | 3 +-- src/helper_sql.py | 1 + 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/class_sqlThread.py b/src/class_sqlThread.py index 7df75137..3d59803c 100644 --- a/src/class_sqlThread.py +++ b/src/class_sqlThread.py @@ -4,22 +4,21 @@ sqlThread is defined here import threading from bmconfigparser import BMConfigParser + import sqlite3 import time import shutil # used for moving the messages.dat file import sys import os from debug import logger + import helper_sql import helper_startup import paths import queues import state import tr - -# This thread exists because SQLITE3 is so un-threadsafe that we must -# submit queries to it and it puts results back in a different queue. They -# won't let us just use locks. +# pylint: disable=attribute-defined-outside-init,protected-access class sqlThread(threading.Thread): @@ -28,7 +27,7 @@ class sqlThread(threading.Thread): def __init__(self): threading.Thread.__init__(self, name="SQL") - def run(self): + def run(self): # pylint: disable=too-many-locals, too-many-branches, too-many-statements """Process SQL queries from `.helper_sql.sqlSubmitQueue`""" self.conn = sqlite3.connect(state.appdata + 'messages.dat') self.conn.text_factory = str diff --git a/src/depends.py b/src/depends.py index 0114ec94..7ae9220e 100755 --- a/src/depends.py +++ b/src/depends.py @@ -316,8 +316,8 @@ def check_curses(): """Do curses dependency check. Here we are checking for curses if available or not with check - as interface requires the pythondialog\ package and the dialog - utility. + as interface requires the pythondialog + package and the dialog utility. """ if sys.hexversion < 0x20600F0: logger.error( diff --git a/src/helper_inbox.py b/src/helper_inbox.py index 95214743..1b1710d6 100644 --- a/src/helper_inbox.py +++ b/src/helper_inbox.py @@ -1,10 +1,11 @@ -"""Helper Inbox performs inbox messagese related operations.""" +"""Helper Inbox performs inbox messages related operations.""" from helper_sql import sqlExecute, sqlQuery import queues def insert(t): + """Perform an insert into the "inbox" table""" sqlExecute('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?,?,?,?)''', *t) # shouldn't emit changedInboxUnread and displayNewInboxMessage # at the same time diff --git a/src/helper_search.py b/src/helper_search.py index d6704731..0908ada3 100644 --- a/src/helper_search.py +++ b/src/helper_search.py @@ -1,5 +1,4 @@ -#!/usr/bin/python2.7 - +"""Additional SQL helper for searching messages""" from helper_sql import * try: diff --git a/src/helper_sql.py b/src/helper_sql.py index 138a9f50..ad0a5dc4 100644 --- a/src/helper_sql.py +++ b/src/helper_sql.py @@ -80,6 +80,7 @@ def sqlExecuteChunked(sqlStatement, idCount, *args): def sqlExecute(sqlStatement, *args): + """Execute SQL statement (optionally with arguments)""" sqlLock.acquire() sqlSubmitQueue.put(sqlStatement) From 21faf52f2f19eeccc383a762e826cd71f66f0d74 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Sat, 5 Oct 2019 20:13:08 +0530 Subject: [PATCH 58/70] debug pylint fixes --- src/debug.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/debug.py b/src/debug.py index 472b0d02..cab07275 100644 --- a/src/debug.py +++ b/src/debug.py @@ -138,6 +138,7 @@ def configureLogging(): def resetLogging(): """Reconfigure logging in runtime when state.appdata dir changed""" + # pylint: disable=global-statement, used-before-assignment global logger for i in logger.handlers: logger.removeHandler(i) From e97d02ed783f713ebd8e3150cf155277932f2de1 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 7 Oct 2019 13:38:26 +0530 Subject: [PATCH 59/70] depends pylint fixes --- src/depends.py | 23 ++++++++++++----------- src/helper_ackPayload.py | 8 +++++--- src/helper_bitcoin.py | 1 + src/helper_inbox.py | 4 ++-- src/helper_random.py | 4 +++- src/helper_sql.py | 3 +-- 6 files changed, 24 insertions(+), 19 deletions(-) diff --git a/src/depends.py b/src/depends.py index 7ae9220e..68fba01a 100755 --- a/src/depends.py +++ b/src/depends.py @@ -113,6 +113,7 @@ PACKAGES = { def detectOS(): + """Finding out what Operating System is running""" if detectOS.result is not None: return detectOS.result if sys.platform.startswith('openbsd'): @@ -132,6 +133,7 @@ detectOS.result = None def detectOSRelease(): + """Detecting the release of OS""" with open("/etc/os-release", 'r') as osRelease: version = None for line in osRelease: @@ -148,6 +150,7 @@ def detectOSRelease(): def try_import(module, log_extra=False): + """Try to import the non imported packages""" try: return import_module(module) except ImportError: @@ -208,10 +211,8 @@ def check_sqlite(): ).fetchone()[0] logger.info('SQLite Library Source ID: %s', sqlite_source_id) if sqlite_version_number >= 3006023: - compile_options = ', '.join(map( - lambda row: row[0], - conn.execute('PRAGMA compile_options;') - )) + compile_options = ', '.join( + [row[0] for row in conn.execute('PRAGMA compile_options;')]) logger.info( 'SQLite Library Compile Options: %s', compile_options) # There is no specific version requirement as yet, so we just @@ -230,13 +231,13 @@ def check_sqlite(): conn.close() -def check_openssl(): +def check_openssl(): # pylint: disable=too-many-branches, too-many-return-statements """Do openssl dependency check. Here we are checking for openssl with its all dependent libraries and version checking. """ - + # pylint: disable=protected-access, redefined-outer-name ctypes = try_import('ctypes') if not ctypes: logger.error('Unable to check OpenSSL.') @@ -300,7 +301,7 @@ def check_openssl(): ' ECDH, and ECDSA enabled.') return False matches = cflags_regex.findall(openssl_cflags) - if len(matches) > 0: + if matches: logger.error( 'This OpenSSL library is missing the following required' ' features: %s. PyBitmessage requires OpenSSL 0.9.8b' @@ -311,13 +312,13 @@ def check_openssl(): return False -# TODO: The minimum versions of pythondialog and dialog need to be determined +# ..todo:: The minimum versions of pythondialog and dialog need to be determined def check_curses(): """Do curses dependency check. - Here we are checking for curses if available or not with check - as interface requires the pythondialog - package and the dialog utility. + Here we are checking for curses if available or not with check as interface + requires the `pythondialog `_ package + and the dialog utility. """ if sys.hexversion < 0x20600F0: logger.error( diff --git a/src/helper_ackPayload.py b/src/helper_ackPayload.py index acdbadf7..15ac0058 100644 --- a/src/helper_ackPayload.py +++ b/src/helper_ackPayload.py @@ -1,9 +1,11 @@ -"""This module is for generating ack payload.""" +""" +This module is for generating ack payload +""" +from binascii import hexlify +from struct import pack import highlevelcrypto import helper_random -from binascii import hexlify -from struct import pack from addresses import encodeVarint # This function generates payload objects for message acknowledgements diff --git a/src/helper_bitcoin.py b/src/helper_bitcoin.py index d56e395b..f8146285 100644 --- a/src/helper_bitcoin.py +++ b/src/helper_bitcoin.py @@ -3,6 +3,7 @@ from pyelliptic import arithmetic # This function expects that pubkey begin with \x04 def calculateBitcoinAddressFromPubkey(pubkey): + """This function expects that pubkey begin's with the bitcoin prefix""" if len(pubkey) != 65: print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey), 'bytes long rather than 65.' return "error" diff --git a/src/helper_inbox.py b/src/helper_inbox.py index 1b1710d6..7cb860dc 100644 --- a/src/helper_inbox.py +++ b/src/helper_inbox.py @@ -1,11 +1,11 @@ -"""Helper Inbox performs inbox messages related operations.""" +"""Helper Inbox performs inbox messages related operations""" from helper_sql import sqlExecute, sqlQuery import queues def insert(t): - """Perform an insert into the "inbox" table""" + """Perform an insert into the "inbox" table""" sqlExecute('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?,?,?,?)''', *t) # shouldn't emit changedInboxUnread and displayNewInboxMessage # at the same time diff --git a/src/helper_random.py b/src/helper_random.py index 57f0ccb3..edc70a01 100644 --- a/src/helper_random.py +++ b/src/helper_random.py @@ -1,4 +1,6 @@ -"""Convenience functions for random operations. Not suitable for security / cryptography operations.""" +""" +Convenience functions for random operations. Not suitable for security / cryptography operations +""" import os import random diff --git a/src/helper_sql.py b/src/helper_sql.py index ad0a5dc4..16d36637 100644 --- a/src/helper_sql.py +++ b/src/helper_sql.py @@ -2,11 +2,9 @@ SQL-related functions defined here are really pass the queries (or other SQL commands) to :class:`.threads.sqlThread` through `sqlSubmitQueue` queue and check or return the result got from `sqlReturnQueue`. - This is done that way because :mod:`sqlite3` is so thread-unsafe that they won't even let you call it from different threads using your own locks. SQLite objects can only be used from one thread. - .. note:: This actually only applies for certain deployments, and/or really old version of sqlite. I haven't actually seen it anywhere. Current versions do have support for threading and multiprocessing. @@ -50,6 +48,7 @@ def sqlQuery(sqlStatement, *args): def sqlExecuteChunked(sqlStatement, idCount, *args): + """Execute chunked SQL statement to avoid argument limit""" # SQLITE_MAX_VARIABLE_NUMBER, # unfortunately getting/setting isn't exposed to python sqlExecuteChunked.chunkSize = 999 From 31e3d60fb09a866b5c19ab87eca9ad3508563833 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 7 Oct 2019 19:01:21 +0530 Subject: [PATCH 60/70] helper_ackPayload pylint fixes --- src/helper_ackPayload.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/helper_ackPayload.py b/src/helper_ackPayload.py index 15ac0058..dcce3153 100644 --- a/src/helper_ackPayload.py +++ b/src/helper_ackPayload.py @@ -1,6 +1,7 @@ """ This module is for generating ack payload """ + from binascii import hexlify from struct import pack @@ -8,17 +9,20 @@ import highlevelcrypto import helper_random from addresses import encodeVarint -# This function generates payload objects for message acknowledgements -# Several stealth levels are available depending on the privacy needs; -# a higher level means better stealth, but also higher cost (size+POW) -# - level 0: a random 32-byte sequence with a message header appended -# - level 1: a getpubkey request for a (random) dummy key hash -# - level 2: a standard message, encrypted to a random pubkey - def genAckPayload(streamNumber=1, stealthLevel=0): - """Generate and return payload obj.""" - if (stealthLevel == 2): # Generate privacy-enhanced payload + """ + Generate and return payload obj. + + This function generates payload objects for message acknowledgements + Several stealth levels are available depending on the privacy needs; + a higher level means better stealth, but also higher cost (size+POW) + + - level 0: a random 32-byte sequence with a message header appended + - level 1: a getpubkey request for a (random) dummy key hash + - level 2: a standard message, encrypted to a random pubkey + """ + if stealthLevel == 2: # Generate privacy-enhanced payload # Generate a dummy privkey and derive the pubkey dummyPubKeyHex = highlevelcrypto.privToPub( hexlify(helper_random.randomBytes(32))) @@ -31,7 +35,7 @@ def genAckPayload(streamNumber=1, stealthLevel=0): acktype = 2 # message version = 1 - elif (stealthLevel == 1): # Basic privacy payload (random getpubkey) + elif stealthLevel == 1: # Basic privacy payload (random getpubkey) ackdata = helper_random.randomBytes(32) acktype = 0 # getpubkey version = 4 From 27c58b05f3caa549da8b25b94e35787eb0e4222f Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 7 Oct 2019 19:28:12 +0530 Subject: [PATCH 61/70] helper_bitcoin pylint fixes --- src/helper_bitcoin.py | 19 +++++++++++++++---- src/helper_sent.py | 4 ++++ 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/src/helper_bitcoin.py b/src/helper_bitcoin.py index f8146285..d4f1d105 100644 --- a/src/helper_bitcoin.py +++ b/src/helper_bitcoin.py @@ -1,11 +1,19 @@ +""" +Calculates bitcoin and testnet address from pubkey +""" + import hashlib + +from debug import logger from pyelliptic import arithmetic -# This function expects that pubkey begin with \x04 + def calculateBitcoinAddressFromPubkey(pubkey): - """This function expects that pubkey begin's with the bitcoin prefix""" + """Calculate bitcoin address from given pubkey (65 bytes long hex string)""" if len(pubkey) != 65: - print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey), 'bytes long rather than 65.' + logger.error('Could not calculate Bitcoin address from pubkey because' + ' function was passed a pubkey that was' + ' %i bytes long rather than 65.', len(pubkey)) return "error" ripe = hashlib.new('ripemd160') sha = hashlib.new('sha256') @@ -25,8 +33,11 @@ def calculateBitcoinAddressFromPubkey(pubkey): def calculateTestnetAddressFromPubkey(pubkey): + """This function expects that pubkey begin with the testnet prefix""" if len(pubkey) != 65: - print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey), 'bytes long rather than 65.' + logger.error('Could not calculate Bitcoin address from pubkey because' + ' function was passed a pubkey that was' + ' %i bytes long rather than 65.', len(pubkey)) return "error" ripe = hashlib.new('ripemd160') sha = hashlib.new('sha256') diff --git a/src/helper_sent.py b/src/helper_sent.py index 8dde7215..6b73c8c5 100644 --- a/src/helper_sent.py +++ b/src/helper_sent.py @@ -1,3 +1,7 @@ +""" +Insert operation into sent table +""" + from helper_sql import * def insert(t): From f4c7ac56047d60beae7c3ed7a01e26da40a02dd4 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Mon, 7 Oct 2019 19:47:40 +0530 Subject: [PATCH 62/70] helper_inbox pylint fixes --- src/helper_inbox.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/helper_inbox.py b/src/helper_inbox.py index 7cb860dc..654dd59d 100644 --- a/src/helper_inbox.py +++ b/src/helper_inbox.py @@ -1,7 +1,7 @@ """Helper Inbox performs inbox messages related operations""" -from helper_sql import sqlExecute, sqlQuery import queues +from helper_sql import sqlExecute, sqlQuery def insert(t): @@ -13,11 +13,13 @@ def insert(t): def trash(msgid): + """Mark a message in the `inbox` as `trash`""" sqlExecute('''UPDATE inbox SET folder='trash' WHERE msgid=?''', msgid) queues.UISignalQueue.put(('removeInboxRowByMsgid', msgid)) def isMessageAlreadyInInbox(sigHash): + """Check for previous instances of this message""" queryReturn = sqlQuery( '''SELECT COUNT(*) FROM inbox WHERE sighash=?''', sigHash) return queryReturn[0][0] != 0 From 05cda087d6e59420aecc1e241fccc96b331a1eeb Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 8 Oct 2019 12:33:48 +0530 Subject: [PATCH 63/70] helper_msgcoding pylint fixes --- src/helper_msgcoding.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/helper_msgcoding.py b/src/helper_msgcoding.py index cc632ffa..ae2bf80b 100644 --- a/src/helper_msgcoding.py +++ b/src/helper_msgcoding.py @@ -25,19 +25,24 @@ BITMESSAGE_ENCODING_EXTENDED = 3 class MsgEncodeException(Exception): + """Exception during message encoding""" pass class MsgDecodeException(Exception): + """Exception during message decoding""" pass class DecompressionSizeException(MsgDecodeException): + # pylint: disable=super-init-not-called + """Decompression resulted in too much data (attack protection)""" def __init__(self, size): self.size = size class MsgEncode(object): + """Message encoder class""" def __init__(self, message, encoding=BITMESSAGE_ENCODING_SIMPLE): self.data = None self.encoding = encoding @@ -52,6 +57,7 @@ class MsgEncode(object): raise MsgEncodeException("Unknown encoding %i" % (encoding)) def encodeExtended(self, message): + """Handle extended encoding""" try: msgObj = messagetypes.message.Message() self.data = zlib.compress(msgpack.dumps(msgObj.encode(message)), 9) @@ -64,15 +70,18 @@ class MsgEncode(object): self.length = len(self.data) def encodeSimple(self, message): + """Handle simple encoding""" self.data = 'Subject:%(subject)s\nBody:%(body)s' % message self.length = len(self.data) def encodeTrivial(self, message): + """Handle trivial encoding""" self.data = message['body'] self.length = len(self.data) class MsgDecode(object): + """Message decoder class""" def __init__(self, encoding, data): self.encoding = encoding if self.encoding == BITMESSAGE_ENCODING_EXTENDED: @@ -88,6 +97,7 @@ class MsgDecode(object): self.subject = _translate("MsgDecode", "Unknown encoding") def decodeExtended(self, data): + """Handle extended encoding""" dc = zlib.decompressobj() tmp = "" while len(tmp) <= BMConfigParser().safeGetInt("zlib", "maxsize"): @@ -131,6 +141,7 @@ class MsgDecode(object): self.body = msgObj.body def decodeSimple(self, data): + """Handle simple encoding""" bodyPositionIndex = string.find(data, '\nBody:') if bodyPositionIndex > 1: subject = data[8:bodyPositionIndex] From 28cfe78e6721f59f27f3df3a626ef61745143e00 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Tue, 8 Oct 2019 15:12:31 +0530 Subject: [PATCH 64/70] helper_random pylint fixes --- src/helper_random.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/helper_random.py b/src/helper_random.py index edc70a01..0785c737 100644 --- a/src/helper_random.py +++ b/src/helper_random.py @@ -1,6 +1,4 @@ -""" -Convenience functions for random operations. Not suitable for security / cryptography operations -""" +"""Convenience functions for random operations. Not suitable for security / cryptography operations.""" import os import random @@ -58,8 +56,7 @@ def randomrandrange(x, y=None): """ if isinstance(y, NoneType): return random.randrange(x) # nosec - else: - return random.randrange(x, y) # nosec + return random.randrange(x, y) # nosec def randomchoice(population): From 9041b8f64431c4047992e0699dba1be80a7f762f Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Wed, 9 Oct 2019 16:30:12 +0530 Subject: [PATCH 65/70] helper_search flake8 fixes --- src/helper_search.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/helper_search.py b/src/helper_search.py index 0908ada3..8dfc7eb6 100644 --- a/src/helper_search.py +++ b/src/helper_search.py @@ -1,5 +1,6 @@ """Additional SQL helper for searching messages""" -from helper_sql import * + +from helper_sql import sqlQuery try: from PyQt4 import QtGui @@ -7,13 +8,15 @@ try: except ImportError: haveQt = False -def search_translate (context, text): + +def search_translate(context, text): if haveQt: return QtGui.QApplication.translate(context, text) else: return text.lower() -def search_sql(xAddress = "toaddress", account = None, folder = "inbox", where = None, what = None, unreadOnly = False): + +def search_sql(xAddress="toaddress", account=None, folder="inbox", where=None, what=None, unreadOnly=False): if what is not None and what != "": what = "%" + what + "%" if where == search_translate("MainWindow", "To"): @@ -31,7 +34,7 @@ def search_sql(xAddress = "toaddress", account = None, folder = "inbox", where = if folder == "sent": sqlStatementBase = ''' - SELECT toaddress, fromaddress, subject, status, ackdata, lastactiontime + SELECT toaddress, fromaddress, subject, status, ackdata, lastactiontime FROM sent ''' else: sqlStatementBase = '''SELECT folder, msgid, toaddress, fromaddress, subject, received, read @@ -67,7 +70,8 @@ def search_sql(xAddress = "toaddress", account = None, folder = "inbox", where = sqlStatementBase += " ORDER BY lastactiontime" return sqlQuery(sqlStatementBase, sqlArguments) -def check_match(toAddress, fromAddress, subject, message, where = None, what = None): + +def check_match(toAddress, fromAddress, subject, message, where=None, what=None): if what is not None and what != "": if where in (search_translate("MainWindow", "To"), search_translate("MainWindow", "All")): if what.lower() not in toAddress.lower(): From d5f541a2abdb2d9411fd11eca1bf12c021445018 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Wed, 9 Oct 2019 17:33:51 +0530 Subject: [PATCH 66/70] helper_search pylint fixes --- src/helper_search.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/helper_search.py b/src/helper_search.py index 8dfc7eb6..69acec43 100644 --- a/src/helper_search.py +++ b/src/helper_search.py @@ -10,13 +10,15 @@ except ImportError: def search_translate(context, text): + """Translation wrapper""" if haveQt: return QtGui.QApplication.translate(context, text) - else: - return text.lower() + return text.lower() def search_sql(xAddress="toaddress", account=None, folder="inbox", where=None, what=None, unreadOnly=False): + """Perform a search in mailbox tables""" + # pylint: disable=too-many-arguments, too-many-branches if what is not None and what != "": what = "%" + what + "%" if where == search_translate("MainWindow", "To"): @@ -64,7 +66,7 @@ def search_sql(xAddress="toaddress", account=None, folder="inbox", where=None, w sqlArguments.append(what) if unreadOnly: sqlStatementParts.append("read = 0") - if len(sqlStatementParts) > 0: + if sqlStatementParts: sqlStatementBase += "WHERE " + " AND ".join(sqlStatementParts) if folder == "sent": sqlStatementBase += " ORDER BY lastactiontime" @@ -72,6 +74,8 @@ def search_sql(xAddress="toaddress", account=None, folder="inbox", where=None, w def check_match(toAddress, fromAddress, subject, message, where=None, what=None): + """Check if a single message matches a filter (used when new messages are added to messagelists)""" + # pylint: disable=too-many-arguments if what is not None and what != "": if where in (search_translate("MainWindow", "To"), search_translate("MainWindow", "All")): if what.lower() not in toAddress.lower(): From d271996ac14c7b7097b4cbb395166aeb6477df11 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Wed, 9 Oct 2019 18:53:36 +0530 Subject: [PATCH 67/70] helper_sent flake8 fixes --- src/helper_sent.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/helper_sent.py b/src/helper_sent.py index 6b73c8c5..75682fa9 100644 --- a/src/helper_sent.py +++ b/src/helper_sent.py @@ -2,7 +2,5 @@ Insert operation into sent table """ -from helper_sql import * - def insert(t): sqlExecute('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', *t) From ece3005f42c7f3595f46ed290a6a6da9191a7a21 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Wed, 9 Oct 2019 18:57:51 +0530 Subject: [PATCH 68/70] helper_sent pylint fixes --- src/helper_sent.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/helper_sent.py b/src/helper_sent.py index 75682fa9..5a345fe7 100644 --- a/src/helper_sent.py +++ b/src/helper_sent.py @@ -1,6 +1,9 @@ """ -Insert operation into sent table +Insert values into sent table """ +from helper_sql import * + def insert(t): + """Perform an insert into the `sent` table""" sqlExecute('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', *t) From e47b573b3eda9c93f56ecdb86b9c6c66fe4698f1 Mon Sep 17 00:00:00 2001 From: lakshyacis Date: Thu, 10 Oct 2019 12:56:39 +0530 Subject: [PATCH 69/70] helper_sql pylint fixes --- src/helper_sql.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/helper_sql.py b/src/helper_sql.py index 16d36637..e7f2a60e 100644 --- a/src/helper_sql.py +++ b/src/helper_sql.py @@ -2,9 +2,11 @@ SQL-related functions defined here are really pass the queries (or other SQL commands) to :class:`.threads.sqlThread` through `sqlSubmitQueue` queue and check or return the result got from `sqlReturnQueue`. + This is done that way because :mod:`sqlite3` is so thread-unsafe that they won't even let you call it from different threads using your own locks. SQLite objects can only be used from one thread. + .. note:: This actually only applies for certain deployments, and/or really old version of sqlite. I haven't actually seen it anywhere. Current versions do have support for threading and multiprocessing. @@ -92,13 +94,15 @@ def sqlExecute(sqlStatement, *args): sqlLock.release() return rowcount + def sqlStoredProcedure(procName): + """Schedule procName to be run""" sqlLock.acquire() sqlSubmitQueue.put(procName) sqlLock.release() -class SqlBulkExecute: +class SqlBulkExecute(object): """This is used when you have to execute the same statement in a cycle.""" def __enter__(self): From a69732f060608428ddfebf608bdb3c8751e296ce Mon Sep 17 00:00:00 2001 From: Peter Surda Date: Wed, 27 Nov 2019 06:47:04 +0100 Subject: [PATCH 70/70] Addrthread finish - addrthread is supposed to spread addresses as they appear. This was never finished during migration to asyncore - conservative to prevent flood and loops - randomises order - move protocol constants into a separate file - move addr packet creation into a separate file - see #1575 --- src/network/addrthread.py | 28 +++++++++++++---- src/network/announcethread.py | 5 +-- src/network/assemble.py | 32 +++++++++++++++++++ src/network/bmproto.py | 58 ++++++++++------------------------- src/network/constants.py | 11 +++++++ src/network/tcp.py | 6 ++-- 6 files changed, 88 insertions(+), 52 deletions(-) create mode 100644 src/network/assemble.py create mode 100644 src/network/constants.py diff --git a/src/network/addrthread.py b/src/network/addrthread.py index d5d21599..8a0396f8 100644 --- a/src/network/addrthread.py +++ b/src/network/addrthread.py @@ -1,6 +1,11 @@ +""" +Announce addresses as they are received from other hosts +""" import Queue import state +from helper_random import randomshuffle +from network.assemble import assemble_addr from network.connectionpool import BMConnectionPool from queues import addrQueue from threads import StoppableThread @@ -15,15 +20,26 @@ class AddrThread(StoppableThread): while True: try: data = addrQueue.get(False) - chunk.append((data[0], data[1])) - if len(data) > 2: - source = BMConnectionPool().getConnectionByAddr(data[2]) + chunk.append(data) except Queue.Empty: break - except KeyError: - continue - # finish + if chunk: + # Choose peers randomly + connections = BMConnectionPool().establishedConnections() + randomshuffle(connections) + for i in connections: + randomshuffle(chunk) + filtered = [] + for stream, peer, seen, destination in chunk: + # peer's own address or address received from peer + if i.destination in (peer, destination): + continue + if stream not in i.streams: + continue + filtered.append((stream, peer, seen)) + if filtered: + i.append_write_buf(assemble_addr(filtered)) addrQueue.iterate() for i in range(len(chunk)): diff --git a/src/network/announcethread.py b/src/network/announcethread.py index f635fc90..c11a2cc6 100644 --- a/src/network/announcethread.py +++ b/src/network/announcethread.py @@ -6,8 +6,9 @@ src/network/announcethread.py import time import state + from bmconfigparser import BMConfigParser -from network.bmproto import BMProto +from network.assemble import assemble_addr from network.connectionpool import BMConnectionPool from network.udp import UDPSocket from node import Peer @@ -41,4 +42,4 @@ class AnnounceThread(StoppableThread): '127.0.0.1', BMConfigParser().safeGetInt('bitmessagesettings', 'port')), time.time()) - connection.append_write_buf(BMProto.assembleAddr([addr])) + connection.append_write_buf(assemble_addr([addr])) diff --git a/src/network/assemble.py b/src/network/assemble.py new file mode 100644 index 00000000..2d31914c --- /dev/null +++ b/src/network/assemble.py @@ -0,0 +1,32 @@ +""" +Create bitmessage protocol command packets +""" + +import struct + +import addresses +from network.constants import MAX_ADDR_COUNT +from network.node import Peer +from protocol import CreatePacket, encodeHost + + +def assemble_addr(peerList): + """Create address command""" + if isinstance(peerList, Peer): + peerList = (peerList) + if not peerList: + return b'' + retval = b'' + for i in range(0, len(peerList), MAX_ADDR_COUNT): + payload = addresses.encodeVarint( + len(peerList[i:i + MAX_ADDR_COUNT])) + for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: + payload += struct.pack( + '>Q', timestamp) # 64-bit time + payload += struct.pack('>I', stream) + payload += struct.pack( + '>q', 1) # service bit flags offered by this node + payload += encodeHost(peer.host) + payload += struct.pack('>H', peer.port) # remote port + retval += CreatePacket('addr', payload) + return retval diff --git a/src/network/bmproto.py b/src/network/bmproto.py index 11e96fd6..d620daa3 100644 --- a/src/network/bmproto.py +++ b/src/network/bmproto.py @@ -19,6 +19,12 @@ import state from bmconfigparser import BMConfigParser from inventory import Inventory from network.advanceddispatcher import AdvancedDispatcher +from network.constants import ( + ADDRESS_ALIVE, + MAX_MESSAGE_SIZE, + MAX_OBJECT_COUNT, + MAX_OBJECT_PAYLOAD_SIZE, + MAX_TIME_OFFSET) from network.dandelion import Dandelion from network.bmobject import ( BMObject, BMObjectInsufficientPOWError, BMObjectInvalidDataError, @@ -51,18 +57,6 @@ class BMProtoExcessiveDataError(BMProtoError): class BMProto(AdvancedDispatcher, ObjectTracker): """A parser for the Bitmessage Protocol""" # pylint: disable=too-many-instance-attributes, too-many-public-methods - # ~1.6 MB which is the maximum possible size of an inv message. - maxMessageSize = 1600100 - # 2**18 = 256kB is the maximum size of an object payload - maxObjectPayloadSize = 2**18 - # protocol specification says max 1000 addresses in one addr command - maxAddrCount = 1000 - # protocol specification says max 50000 objects in one inv command - maxObjectCount = 50000 - # address is online if online less than this many seconds ago - addressAlive = 10800 - # maximum time offset - maxTimeOffset = 3600 timeOffsetWrongCount = 0 def __init__(self, address=None, sock=None): # pylint: disable=unused-argument, super-init-not-called @@ -100,7 +94,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): self.close_reason = "Bad magic" self.set_state("close") return False - if self.payloadLength > BMProto.maxMessageSize: + if self.payloadLength > MAX_MESSAGE_SIZE: self.invalid = True self.set_state( "bm_command", @@ -343,7 +337,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): def _command_inv(self, dandelion=False): items = self.decode_payload_content("l32s") - if len(items) > BMProto.maxObjectCount: + if len(items) > MAX_OBJECT_COUNT: logger.error( 'Too many items in %sinv message!', 'd' if dandelion else '') raise BMProtoExcessiveDataError() @@ -378,7 +372,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): nonce, expiresTime, objectType, version, streamNumber, self.payload, self.payloadOffset) - if len(self.payload) - self.payloadOffset > BMProto.maxObjectPayloadSize: + if len(self.payload) - self.payloadOffset > MAX_OBJECT_PAYLOAD_SIZE: logger.info( 'The payload length of this object is too large (%d bytes).' ' Ignoring it.', len(self.payload) - self.payloadOffset) @@ -442,7 +436,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): continue if ( decodedIP and time.time() - seenTime > 0 and - seenTime > time.time() - BMProto.addressAlive and + seenTime > time.time() - ADDRESS_ALIVE and port > 0 ): peer = Peer(decodedIP, port) @@ -461,7 +455,10 @@ class BMProto(AdvancedDispatcher, ObjectTracker): "rating": 0, "self": False, } - addrQueue.put((stream, peer, self.destination)) + # since we don't track peers outside of knownnodes, + # only spread if in knownnodes to prevent flood + addrQueue.put((stream, peer, seenTime, + self.destination)) return True def bm_command_portcheck(self): @@ -552,7 +549,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): 'Closing connection to old protocol version %s, node: %s', self.remoteProtocolVersion, self.destination) return False - if self.timeOffset > BMProto.maxTimeOffset: + if self.timeOffset > MAX_TIME_OFFSET: self.append_write_buf(protocol.assembleErrorMessage( errorText="Your time is too far in the future compared to mine." " Closing connection.", fatal=2)) @@ -561,7 +558,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker): " Closing connection to it.", self.destination, self.timeOffset) BMProto.timeOffsetWrongCount += 1 return False - elif self.timeOffset < -BMProto.maxTimeOffset: + elif self.timeOffset < -MAX_TIME_OFFSET: self.append_write_buf(protocol.assembleErrorMessage( errorText="Your time is too far in the past compared to mine." " Closing connection.", fatal=2)) @@ -623,29 +620,6 @@ class BMProto(AdvancedDispatcher, ObjectTracker): return True - @staticmethod - def assembleAddr(peerList): - """Build up a packed address""" - if isinstance(peerList, Peer): - peerList = (peerList) - if not peerList: - return b'' - retval = b'' - for i in range(0, len(peerList), BMProto.maxAddrCount): - payload = addresses.encodeVarint( - len(peerList[i:i + BMProto.maxAddrCount])) - for address in peerList[i:i + BMProto.maxAddrCount]: - stream, peer, timestamp = address - payload += struct.pack( - '>Q', timestamp) # 64-bit time - payload += struct.pack('>I', stream) - payload += struct.pack( - '>q', 1) # service bit flags offered by this node - payload += protocol.encodeHost(peer.host) - payload += struct.pack('>H', peer.port) # remote port - retval += protocol.CreatePacket('addr', payload) - return retval - @staticmethod def stopDownloadingObject(hashId, forwardAnyway=False): """Stop downloading an object""" diff --git a/src/network/constants.py b/src/network/constants.py new file mode 100644 index 00000000..a3414ef3 --- /dev/null +++ b/src/network/constants.py @@ -0,0 +1,11 @@ +""" +Network protocol constants +""" + + +ADDRESS_ALIVE = 10800 #: address is online if online less than this many seconds ago +MAX_ADDR_COUNT = 1000 #: protocol specification says max 1000 addresses in one addr command +MAX_MESSAGE_SIZE = 1600100 #: ~1.6 MB which is the maximum possible size of an inv message. +MAX_OBJECT_PAYLOAD_SIZE = 2**18 #: 2**18 = 256kB is the maximum size of an object payload +MAX_OBJECT_COUNT = 50000 #: protocol specification says max 50000 objects in one inv command +MAX_TIME_OFFSET = 3600 #: maximum time offset diff --git a/src/network/tcp.py b/src/network/tcp.py index 31d20dea..3097765f 100644 --- a/src/network/tcp.py +++ b/src/network/tcp.py @@ -22,7 +22,9 @@ from bmconfigparser import BMConfigParser from helper_random import randomBytes from inventory import Inventory from network.advanceddispatcher import AdvancedDispatcher +from network.assemble import assemble_addr from network.bmproto import BMProto +from network.constants import MAX_OBJECT_COUNT from network.dandelion import Dandelion from network.objectracker import ObjectTracker from network.socks4a import Socks4aConnection @@ -183,7 +185,7 @@ class TCPConnection(BMProto, TLSDispatcher): for peer, params in addrs[substream]: templist.append((substream, peer, params["lastseen"])) if templist: - self.append_write_buf(BMProto.assembleAddr(templist)) + self.append_write_buf(assemble_addr(templist)) def sendBigInv(self): """ @@ -222,7 +224,7 @@ class TCPConnection(BMProto, TLSDispatcher): # Remove -1 below when sufficient time has passed for users to # upgrade to versions of PyBitmessage that accept inv with 50,000 # items - if objectCount >= BMProto.maxObjectCount - 1: + if objectCount >= MAX_OBJECT_COUNT - 1: sendChunk() payload = b'' objectCount = 0