fixed conflicts after merge
This commit is contained in:
commit
c98d1a70f4
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -17,6 +17,7 @@ dist
|
|||
*.egg-info
|
||||
docs/_*/*
|
||||
docs/autodoc/
|
||||
build/sphinx/
|
||||
pyan/
|
||||
.buildozer/
|
||||
bin/
|
||||
|
|
9
.readthedocs.yml
Normal file
9
.readthedocs.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
version: 2
|
||||
|
||||
python:
|
||||
version: 2.7
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
- method: setuptools
|
||||
path: .
|
||||
system_packages: true
|
|
@ -14,12 +14,10 @@ Development
|
|||
----------
|
||||
Bitmessage is a collaborative project. You are welcome to submit pull requests
|
||||
although if you plan to put a non-trivial amount of work into coding new
|
||||
features, it is recommended that you first solicit feedback on the DevTalk
|
||||
pseudo-mailing list:
|
||||
BM-2D9QKN4teYRvoq2fyzpiftPh9WP9qggtzh
|
||||
features, it is recommended that you first describe your ideas in the
|
||||
separate issue.
|
||||
|
||||
Feel welcome to join chan "bitmessage", BM-2cWy7cvHoq3f1rYMerRJp8PT653jjSuEdY
|
||||
which is on preview here: https://beamstat.com/chan/bitmessage
|
||||
Feel welcome to join chan "bitmessage", BM-2cWy7cvHoq3f1rYMerRJp8PT653jjSuEdY
|
||||
|
||||
References
|
||||
----------
|
||||
|
|
4
docs/_static/custom.css
vendored
Normal file
4
docs/_static/custom.css
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
/* Hide "On GitHub" section from versions menu */
|
||||
li.wy-breadcrumbs-aside > a.fa {
|
||||
display: none;
|
||||
}
|
119
docs/conf.py
119
docs/conf.py
|
@ -2,35 +2,24 @@
|
|||
"""
|
||||
Configuration file for the Sphinx documentation builder.
|
||||
|
||||
This file does only contain a selection of the most common options. For a
|
||||
full list see the documentation:
|
||||
For a full list of options see the documentation:
|
||||
http://www.sphinx-doc.org/en/master/config
|
||||
|
||||
-- Path setup --------------------------------------------------------------
|
||||
|
||||
If extensions (or modules to document with autodoc) are in another directory,
|
||||
add these directories to sys.path here. If the directory is relative to the
|
||||
documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from sphinx.apidoc import main
|
||||
from mock import Mock as MagicMock
|
||||
|
||||
sys.path.insert(0, os.path.abspath('.'))
|
||||
sys.path.insert(0, os.path.abspath('..'))
|
||||
sys.path.insert(0, os.path.abspath('../src'))
|
||||
sys.path.insert(0, os.path.abspath('../src/pyelliptic'))
|
||||
|
||||
import version
|
||||
from importlib import import_module
|
||||
|
||||
import version # noqa:E402
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = u'PyBitmessage'
|
||||
copyright = u'2018, The Bitmessage Team' # pylint: disable=redefined-builtin
|
||||
copyright = u'2019, The Bitmessage Team' # pylint: disable=redefined-builtin
|
||||
author = u'The Bitmessage Team'
|
||||
|
||||
# The short X.Y version
|
||||
|
@ -50,15 +39,18 @@ release = version
|
|||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
# 'sphinx.ext.doctest', # Currently disabled due to bad doctests
|
||||
'sphinx.ext.coverage', # FIXME: unused
|
||||
'sphinx.ext.imgmath', # legacy unused
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.linkcode',
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.coverage',
|
||||
'sphinx.ext.imgmath',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinxcontrib.apidoc',
|
||||
'm2r',
|
||||
]
|
||||
|
||||
default_role = 'obj'
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
|
@ -75,23 +67,29 @@ master_doc = 'index'
|
|||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
# language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This pattern also affects html_static_path and html_extra_path .
|
||||
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# Don't prepend every class or function name with full module path
|
||||
add_module_names = False
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
modindex_common_prefix = ['pybitmessage.']
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = 'alabaster'
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
|
@ -104,6 +102,10 @@ html_theme = 'alabaster'
|
|||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
html_css_files = [
|
||||
'custom.css',
|
||||
]
|
||||
|
||||
# Custom sidebar templates, must be a dictionary that maps document names
|
||||
# to template names.
|
||||
#
|
||||
|
@ -114,10 +116,7 @@ html_static_path = ['_static']
|
|||
#
|
||||
# html_sidebars = {}
|
||||
|
||||
# Deal with long lines in source view
|
||||
html_theme_options = {
|
||||
'page_width': '1366px',
|
||||
}
|
||||
html_show_sourcelink = False
|
||||
|
||||
# -- Options for HTMLHelp output ---------------------------------------------
|
||||
|
||||
|
@ -199,10 +198,74 @@ epub_exclude_files = ['search.html']
|
|||
|
||||
# -- Extension configuration -------------------------------------------------
|
||||
|
||||
autodoc_mock_imports = [
|
||||
'debug',
|
||||
'pybitmessage.bitmessagekivy',
|
||||
'pybitmessage.bitmessageqt.addressvalidator',
|
||||
'pybitmessage.helper_startup',
|
||||
'pybitmessage.network.httpd',
|
||||
'pybitmessage.network.https',
|
||||
'ctypes',
|
||||
'dialog',
|
||||
'gi',
|
||||
'kivy',
|
||||
'logging',
|
||||
'msgpack',
|
||||
'numpy',
|
||||
'pkg_resources',
|
||||
'pycanberra',
|
||||
'pyopencl',
|
||||
'PyQt4',
|
||||
'pyxdg',
|
||||
'qrcode',
|
||||
'stem',
|
||||
]
|
||||
autodoc_member_order = 'bysource'
|
||||
|
||||
# Apidoc settings
|
||||
apidoc_module_dir = '../pybitmessage'
|
||||
apidoc_output_dir = 'autodoc'
|
||||
apidoc_excluded_paths = [
|
||||
'bitmessagekivy', 'build_osx.py',
|
||||
'bitmessageqt/addressvalidator.py', 'bitmessageqt/migrationwizard.py',
|
||||
'bitmessageqt/newaddresswizard.py', 'helper_startup.py',
|
||||
'kivymd', 'main.py', 'navigationdrawer', 'network/http*',
|
||||
'pybitmessage', 'tests', 'version.py'
|
||||
]
|
||||
apidoc_module_first = True
|
||||
apidoc_separate_modules = True
|
||||
apidoc_toc_file = False
|
||||
apidoc_extra_args = ['-a']
|
||||
|
||||
# Napoleon settings
|
||||
napoleon_google_docstring = True
|
||||
|
||||
|
||||
# linkcode function
|
||||
def linkcode_resolve(domain, info):
|
||||
"""This generates source URL's for sphinx.ext.linkcode"""
|
||||
if domain != 'py' or not info['module']:
|
||||
return
|
||||
try:
|
||||
home = os.path.abspath(import_module('pybitmessage').__path__[0])
|
||||
mod = import_module(info['module']).__file__
|
||||
except ImportError:
|
||||
return
|
||||
repo = 'https://github.com/Bitmessage/PyBitmessage/blob/v0.6/src%s'
|
||||
path = mod.replace(home, '')
|
||||
if path != mod:
|
||||
# put the link only for top level definitions
|
||||
if len(info['fullname'].split('.')) > 1:
|
||||
return
|
||||
if path.endswith('.pyc'):
|
||||
path = path[:-1]
|
||||
return repo % path
|
||||
|
||||
|
||||
# -- Options for intersphinx extension ---------------------------------------
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {'https://docs.python.org/': None}
|
||||
intersphinx_mapping = {'https://docs.python.org/2.7/': None}
|
||||
|
||||
# -- Options for todo extension ----------------------------------------------
|
||||
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
.. mdinclude:: fabfile/README.md
|
||||
.. mdinclude:: ../../../fabfile/README.md
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ To re-build them, run `fab build_docs:dep_graphs=true`. Note that the dot graph
|
|||
.. figure:: ../../../../_static/deps-sfdp.png
|
||||
:alt: SFDP graph of dependencies
|
||||
:width: 100 pc
|
||||
|
||||
|
||||
:index:`SFDP` graph of dependencies
|
||||
|
||||
.. figure:: ../../../../_static/deps-dot.png
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
Processes
|
||||
=========
|
||||
|
||||
In other to keep the Bitmessage project running the team run a number of systems and accounts that form the
|
||||
development pipeline and continuous delivery process. We are always striving to improve the process. Towards
|
||||
In order to keep the Bitmessage project running, the team runs a number of systems and accounts that form the
|
||||
development pipeline and continuous delivery process. We are always striving to improve this process. Towards
|
||||
that end it is documented here.
|
||||
|
||||
|
||||
|
@ -20,7 +20,7 @@ Our official Github_ account is Bitmessage. Our issue tracker is here as well.
|
|||
BitMessage
|
||||
----------
|
||||
|
||||
We eat our own dog food! You can send us bug reports via the Bitmessage chan at xxx
|
||||
We eat our own dog food! You can send us bug reports via the [chan] bitmessage BM-2cWy7cvHoq3f1rYMerRJp8PT653jjSuEdY
|
||||
|
||||
|
||||
.. _website: https://bitmessage.org
|
||||
|
|
|
@ -1,12 +1,20 @@
|
|||
.. mdinclude:: ../README.md
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
|
||||
autodoc/pybitmessage
|
||||
|
||||
Legacy pages
|
||||
------------
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
overview
|
||||
usage
|
||||
contribute
|
||||
|
||||
|
||||
Indices and tables
|
||||
------------------
|
||||
|
||||
|
|
2
docs/requirements.txt
Normal file
2
docs/requirements.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
m2r
|
||||
sphinxcontrib-apidoc
|
1
pybitmessage
Symbolic link
1
pybitmessage
Symbolic link
|
@ -0,0 +1 @@
|
|||
src
|
|
@ -1,14 +1,17 @@
|
|||
# Since there is overlap in the violations that the different tools check for, it makes sense to quiesce some warnings
|
||||
# in some tools if those warnings in other tools are preferred. This avoids the need to add duplicate lint warnings.
|
||||
|
||||
# max-line-length should be removed ASAP!
|
||||
|
||||
[pycodestyle]
|
||||
max-line-length = 119
|
||||
|
||||
[flake8]
|
||||
max-line-length = 119
|
||||
ignore = E722,F841
|
||||
ignore = E722,F841,W503
|
||||
# E722: pylint is preferred for bare-except
|
||||
# F841: pylint is preferred for unused-variable
|
||||
# W503: deprecated: https://bugs.python.org/issue26763 - https://www.python.org/dev/peps/pep-0008/#should-a-line-break-before-or-after-a-binary-operator
|
||||
|
||||
# pylint honours the [MESSAGES CONTROL] section
|
||||
# as well as [MASTER] section
|
||||
|
|
14
setup.py
14
setup.py
|
@ -17,13 +17,7 @@ EXTRAS_REQUIRE = {
|
|||
'qrcode': ['qrcode'],
|
||||
'sound;platform_system=="Windows"': ['winsound'],
|
||||
'tor': ['stem'],
|
||||
'docs': [
|
||||
'sphinx', # fab build_docs
|
||||
'graphviz', # fab build_docs
|
||||
'curses', # src/depends.py
|
||||
'python2-pythondialog', # src/depends.py
|
||||
'm2r', # fab build_docs
|
||||
]
|
||||
'docs': ['sphinx', 'sphinxcontrib-apidoc', 'm2r']
|
||||
}
|
||||
|
||||
|
||||
|
@ -155,5 +149,9 @@ if __name__ == "__main__":
|
|||
# ]
|
||||
},
|
||||
scripts=['src/pybitmessage'],
|
||||
cmdclass={'install': InstallCmd}
|
||||
cmdclass={'install': InstallCmd},
|
||||
command_options={
|
||||
'build_sphinx': {
|
||||
'source_dir': ('setup.py', 'docs')}
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
"""
|
||||
src/addresses.py
|
||||
================
|
||||
|
||||
Operations with addresses
|
||||
"""
|
||||
# pylint: disable=redefined-outer-name,inconsistent-return-statements
|
||||
|
||||
|
@ -18,8 +16,9 @@ ALPHABET = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
|
|||
def encodeBase58(num, alphabet=ALPHABET):
|
||||
"""Encode a number in Base X
|
||||
|
||||
`num`: The number to encode
|
||||
`alphabet`: The alphabet to use for encoding
|
||||
Args:
|
||||
num: The number to encode
|
||||
alphabet: The alphabet to use for encoding
|
||||
"""
|
||||
if num == 0:
|
||||
return alphabet[0]
|
||||
|
@ -27,7 +26,6 @@ def encodeBase58(num, alphabet=ALPHABET):
|
|||
base = len(alphabet)
|
||||
while num:
|
||||
rem = num % base
|
||||
# print 'num is:', num
|
||||
num = num // base
|
||||
arr.append(alphabet[rem])
|
||||
arr.reverse()
|
||||
|
@ -37,9 +35,9 @@ def encodeBase58(num, alphabet=ALPHABET):
|
|||
def decodeBase58(string, alphabet=ALPHABET):
|
||||
"""Decode a Base X encoded string into the number
|
||||
|
||||
Arguments:
|
||||
- `string`: The encoded string
|
||||
- `alphabet`: The alphabet to use for encoding
|
||||
Args:
|
||||
string: The encoded string
|
||||
alphabet: The alphabet to use for encoding
|
||||
"""
|
||||
base = len(alphabet)
|
||||
num = 0
|
||||
|
@ -54,11 +52,20 @@ def decodeBase58(string, alphabet=ALPHABET):
|
|||
return num
|
||||
|
||||
|
||||
class varintEncodeError(Exception):
|
||||
"""Exception class for encoding varint"""
|
||||
pass
|
||||
|
||||
|
||||
class varintDecodeError(Exception):
|
||||
"""Exception class for decoding varint data"""
|
||||
pass
|
||||
|
||||
|
||||
def encodeVarint(integer):
|
||||
"""Convert integer into varint bytes"""
|
||||
if integer < 0:
|
||||
logger.error('varint cannot be < 0')
|
||||
raise SystemExit
|
||||
raise varintEncodeError('varint cannot be < 0')
|
||||
if integer < 253:
|
||||
return pack('>B', integer)
|
||||
if integer >= 253 and integer < 65536:
|
||||
|
@ -68,13 +75,7 @@ def encodeVarint(integer):
|
|||
if integer >= 4294967296 and integer < 18446744073709551616:
|
||||
return pack('>B', 255) + pack('>Q', integer)
|
||||
if integer >= 18446744073709551616:
|
||||
logger.error('varint cannot be >= 18446744073709551616')
|
||||
raise SystemExit
|
||||
|
||||
|
||||
class varintDecodeError(Exception):
|
||||
"""Exception class for decoding varint data"""
|
||||
pass
|
||||
raise varintEncodeError('varint cannot be >= 18446744073709551616')
|
||||
|
||||
|
||||
def decodeVarint(data):
|
||||
|
|
13
src/api.py
13
src/api.py
|
@ -1,19 +1,15 @@
|
|||
# pylint: disable=too-many-locals,too-many-lines,no-self-use,too-many-public-methods,too-many-branches
|
||||
# pylint: disable=too-many-statements
|
||||
"""
|
||||
src/api.py
|
||||
==========
|
||||
|
||||
# Copyright (c) 2012-2016 Jonathan Warren
|
||||
# Copyright (c) 2012-2019 The Bitmessage developers
|
||||
|
||||
"""
|
||||
This is not what you run to run the Bitmessage API. Instead, enable the API
|
||||
( https://bitmessage.org/wiki/API ) and optionally enable daemon mode
|
||||
( https://bitmessage.org/wiki/Daemon ) then run bitmessagemain.py.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import base64
|
||||
import errno
|
||||
import hashlib
|
||||
|
@ -42,8 +38,8 @@ from bmconfigparser import BMConfigParser
|
|||
from debug import logger
|
||||
from helper_ackPayload import genAckPayload
|
||||
from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery, sqlStoredProcedure
|
||||
from helper_threading import StoppableThread
|
||||
from inventory import Inventory
|
||||
from network.threads import StoppableThread
|
||||
|
||||
str_chan = '[chan]'
|
||||
|
||||
|
@ -99,6 +95,8 @@ class singleAPI(StoppableThread):
|
|||
for attempt in range(50):
|
||||
try:
|
||||
if attempt > 0:
|
||||
logger.warning(
|
||||
'Failed to start API listener on port %s', port)
|
||||
port = random.randint(32767, 65535)
|
||||
se = StoppableXMLRPCServer(
|
||||
(BMConfigParser().get(
|
||||
|
@ -110,8 +108,9 @@ class singleAPI(StoppableThread):
|
|||
continue
|
||||
else:
|
||||
if attempt > 0:
|
||||
logger.warning('Setting apiport to %s', port)
|
||||
BMConfigParser().set(
|
||||
"bitmessagesettings", "apiport", str(port))
|
||||
'bitmessagesettings', 'apiport', str(port))
|
||||
BMConfigParser().save()
|
||||
break
|
||||
se.register_introspection_functions()
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
#!/usr/bin/python2.7
|
||||
"""
|
||||
src/bitmessagemain.py
|
||||
=================================
|
||||
The PyBitmessage startup script
|
||||
"""
|
||||
# !/usr/bin/python2.7
|
||||
# Copyright (c) 2012-2016 Jonathan Warren
|
||||
# Copyright (c) 2012-2019 The Bitmessage developers
|
||||
# Distributed under the MIT/X11 software license. See the accompanying
|
||||
|
@ -11,8 +10,6 @@ src/bitmessagemain.py
|
|||
# Right now, PyBitmessage only support connecting to stream 1. It doesn't
|
||||
# yet contain logic to expand into further streams.
|
||||
|
||||
# The software version variable is now held in shared.py
|
||||
|
||||
import os
|
||||
import sys
|
||||
import ctypes
|
||||
|
@ -26,41 +23,28 @@ import time
|
|||
import traceback
|
||||
from struct import pack
|
||||
|
||||
from helper_startup import (
|
||||
isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections
|
||||
)
|
||||
from singleinstance import singleinstance
|
||||
|
||||
import defaults
|
||||
import depends
|
||||
import shared
|
||||
import knownnodes
|
||||
import state
|
||||
import shutdown
|
||||
from debug import logger
|
||||
|
||||
# Classes
|
||||
from class_sqlThread import sqlThread
|
||||
from class_singleCleaner import singleCleaner
|
||||
from class_objectProcessor import objectProcessor
|
||||
from class_singleWorker import singleWorker
|
||||
from class_addressGenerator import addressGenerator
|
||||
from bmconfigparser import BMConfigParser
|
||||
|
||||
from debug import logger # this should go before any threads
|
||||
from helper_startup import (
|
||||
isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections
|
||||
)
|
||||
from inventory import Inventory
|
||||
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.dandelion import Dandelion
|
||||
from network.networkthread import BMNetworkThread
|
||||
from network.receivequeuethread import ReceiveQueueThread
|
||||
from network.announcethread import AnnounceThread
|
||||
from network.invthread import InvThread
|
||||
from network.addrthread import AddrThread
|
||||
from network.downloadthread import DownloadThread
|
||||
from network.uploadthread import UploadThread
|
||||
|
||||
# Helper Functions
|
||||
import helper_threading
|
||||
from knownnodes import readKnownNodes
|
||||
# Network objects and threads
|
||||
from network import (
|
||||
BMConnectionPool, Dandelion,
|
||||
AddrThread, AnnounceThread, BMNetworkThread, InvThread, ReceiveQueueThread,
|
||||
DownloadThread, UploadThread)
|
||||
from singleinstance import singleinstance
|
||||
# Synchronous threads
|
||||
from threads import (
|
||||
set_thread_name,
|
||||
addressGenerator, objectProcessor, singleCleaner, singleWorker, sqlThread)
|
||||
|
||||
app_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
os.chdir(app_dir)
|
||||
|
@ -70,7 +54,7 @@ depends.check_dependencies()
|
|||
|
||||
|
||||
def connectToStream(streamNumber):
|
||||
"""Method helps us to connect with the stream"""
|
||||
"""Connect to a stream"""
|
||||
state.streamsInWhichIAmParticipating.append(streamNumber)
|
||||
|
||||
if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections():
|
||||
|
@ -87,14 +71,6 @@ def connectToStream(streamNumber):
|
|||
except:
|
||||
pass
|
||||
|
||||
with knownnodes.knownNodesLock:
|
||||
if streamNumber not in knownnodes.knownNodes:
|
||||
knownnodes.knownNodes[streamNumber] = {}
|
||||
if streamNumber * 2 not in knownnodes.knownNodes:
|
||||
knownnodes.knownNodes[streamNumber * 2] = {}
|
||||
if streamNumber * 2 + 1 not in knownnodes.knownNodes:
|
||||
knownnodes.knownNodes[streamNumber * 2 + 1] = {}
|
||||
|
||||
BMConnectionPool().connectToStream(streamNumber)
|
||||
|
||||
|
||||
|
@ -111,7 +87,7 @@ def _fixSocket():
|
|||
addressToString = ctypes.windll.ws2_32.WSAAddressToStringA
|
||||
|
||||
def inet_ntop(family, host):
|
||||
"""Method converts an IP address in packed binary format to string format"""
|
||||
"""Converting an IP address in packed binary format to string format"""
|
||||
if family == socket.AF_INET:
|
||||
if len(host) != 4:
|
||||
raise ValueError("invalid IPv4 host")
|
||||
|
@ -133,7 +109,7 @@ def _fixSocket():
|
|||
stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
|
||||
|
||||
def inet_pton(family, host):
|
||||
"""Method converts an IP address in string format to a packed binary format"""
|
||||
"""Converting an IP address in string format to a packed binary format"""
|
||||
buf = "\0" * 28
|
||||
lengthBuf = pack("I", len(buf))
|
||||
if stringToAddress(str(host),
|
||||
|
@ -188,9 +164,8 @@ def signal_handler(signum, frame):
|
|||
because the UI captures the signal.'
|
||||
|
||||
|
||||
class Main: # pylint: disable=no-init, old-style-class
|
||||
"""Method starts the proxy config plugin"""
|
||||
|
||||
class Main(object):
|
||||
"""Main PyBitmessage class"""
|
||||
@staticmethod
|
||||
def start_proxyconfig(config):
|
||||
"""Check socksproxytype and start any proxy configuration plugin"""
|
||||
|
@ -213,15 +188,15 @@ class Main: # pylint: disable=no-init, old-style-class
|
|||
'Started proxy config plugin %s in %s sec',
|
||||
proxy_type, time.time() - proxyconfig_start)
|
||||
|
||||
def start(self): # pylint: disable=too-many-statements, too-many-branches, too-many-locals
|
||||
"""This method helps to start the daemon"""
|
||||
def start(self): # pylint: disable=too-many-statements, too-many-branches, too-many-locals
|
||||
"""Start main application"""
|
||||
_fixSocket()
|
||||
|
||||
config = BMConfigParser()
|
||||
daemon = config.safeGetBoolean('bitmessagesettings', 'daemon')
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(
|
||||
opts, _ = getopt.getopt(
|
||||
sys.argv[1:], "hcdt",
|
||||
["help", "curses", "daemon", "test"])
|
||||
|
||||
|
@ -229,7 +204,7 @@ class Main: # pylint: disable=no-init, old-style-class
|
|||
self.usage()
|
||||
sys.exit(2)
|
||||
|
||||
for opt, arg in opts:
|
||||
for opt, _ in opts:
|
||||
if opt in ("-h", "--help"):
|
||||
self.usage()
|
||||
sys.exit()
|
||||
|
@ -287,7 +262,7 @@ class Main: # pylint: disable=no-init, old-style-class
|
|||
|
||||
self.setSignalHandler()
|
||||
|
||||
helper_threading.set_thread_name("PyBitmessage")
|
||||
set_thread_name("PyBitmessage")
|
||||
|
||||
state.dandelion = config.safeGetInt('network', 'dandelion')
|
||||
# dandelion requires outbound connections, without them,
|
||||
|
@ -302,7 +277,10 @@ class Main: # pylint: disable=no-init, old-style-class
|
|||
defaults.networkDefaultProofOfWorkNonceTrialsPerByte / 100)
|
||||
defaults.networkDefaultPayloadLengthExtraBytes = int(
|
||||
defaults.networkDefaultPayloadLengthExtraBytes / 100)
|
||||
knownnodes.readKnownNodes()
|
||||
|
||||
readKnownNodes()
|
||||
|
||||
|
||||
# Not needed if objproc is disabled
|
||||
if state.enableObjProc:
|
||||
|
||||
|
@ -449,7 +427,8 @@ class Main: # pylint: disable=no-init, old-style-class
|
|||
# wait until grandchild ready
|
||||
while True:
|
||||
time.sleep(1)
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
except AttributeError:
|
||||
# fork not implemented
|
||||
pass
|
||||
|
@ -470,7 +449,8 @@ class Main: # pylint: disable=no-init, old-style-class
|
|||
# wait until child ready
|
||||
while True:
|
||||
time.sleep(1)
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
except AttributeError:
|
||||
# fork not implemented
|
||||
pass
|
||||
|
@ -491,7 +471,8 @@ class Main: # pylint: disable=no-init, old-style-class
|
|||
os.kill(parentPid, signal.SIGTERM)
|
||||
os.kill(grandfatherPid, signal.SIGTERM)
|
||||
|
||||
def setSignalHandler(self): # pylint: disable=no-self-use
|
||||
@staticmethod
|
||||
def setSignalHandler():
|
||||
"""Setting the Signal Handler"""
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
@ -499,9 +480,9 @@ class Main: # pylint: disable=no-init, old-style-class
|
|||
|
||||
@staticmethod
|
||||
def usage():
|
||||
"""After passing argument, method displays the usages"""
|
||||
print 'Usage: ' + sys.argv[0] + ' [OPTIONS]'
|
||||
print '''
|
||||
"""Displaying the usages"""
|
||||
print('Usage: ' + sys.argv[0] + ' [OPTIONS]')
|
||||
print('''
|
||||
Options:
|
||||
-h, --help show this help message and exit
|
||||
-c, --curses use curses (text mode) interface
|
||||
|
@ -509,17 +490,19 @@ Options:
|
|||
-t, --test dryrun, make testing
|
||||
|
||||
All parameters are optional.
|
||||
'''
|
||||
''')
|
||||
|
||||
def stop(self): # pylint: disable=no-self-use
|
||||
"""Method helps to stop the Bitmessage Deamon"""
|
||||
@staticmethod
|
||||
def stop():
|
||||
"""Stop main application"""
|
||||
with shared.printLock:
|
||||
print 'Stopping Bitmessage Deamon.'
|
||||
shutdown.doCleanShutdown()
|
||||
|
||||
# ..todo: nice function but no one is using this
|
||||
def getApiAddress(self): # pylint: disable=no-self-use
|
||||
"""This method returns the Api Addresses"""
|
||||
# .. todo:: nice function but no one is using this
|
||||
@staticmethod
|
||||
def getApiAddress():
|
||||
"""This function returns API address and port"""
|
||||
if not BMConfigParser().safeGetBoolean(
|
||||
'bitmessagesettings', 'apienabled'):
|
||||
return None
|
||||
|
@ -529,7 +512,7 @@ All parameters are optional.
|
|||
|
||||
|
||||
def main():
|
||||
"""Start of the main thread"""
|
||||
"""Triggers main module"""
|
||||
mainprogram = Main()
|
||||
mainprogram.start()
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ import network.stats
|
|||
import shared
|
||||
import widgets
|
||||
from inventory import Inventory
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network import BMConnectionPool
|
||||
from retranslateui import RetranslateMixin
|
||||
from tr import _translate
|
||||
from uisignaler import UISignaler
|
||||
|
|
|
@ -99,6 +99,8 @@ class SettingsDialog(QtGui.QDialog):
|
|||
config.getboolean('bitmessagesettings', 'socksauthentication'))
|
||||
self.checkBoxSocksListen.setChecked(
|
||||
config.getboolean('bitmessagesettings', 'sockslisten'))
|
||||
self.checkBoxOnionOnly.setChecked(
|
||||
config.safeGetBoolean('bitmessagesettings', 'onionservicesonly'))
|
||||
|
||||
proxy_type = config.safeGet(
|
||||
'bitmessagesettings', 'socksproxytype', 'none')
|
||||
|
@ -110,6 +112,7 @@ class SettingsDialog(QtGui.QDialog):
|
|||
self.lineEditSocksPassword.setEnabled(False)
|
||||
self.checkBoxAuthentication.setEnabled(False)
|
||||
self.checkBoxSocksListen.setEnabled(False)
|
||||
self.checkBoxOnionOnly.setEnabled(False)
|
||||
elif proxy_type == 'SOCKS4a':
|
||||
self.comboBoxProxyType.setCurrentIndex(1)
|
||||
elif proxy_type == 'SOCKS5':
|
||||
|
@ -200,11 +203,13 @@ class SettingsDialog(QtGui.QDialog):
|
|||
self.lineEditSocksPassword.setEnabled(False)
|
||||
self.checkBoxAuthentication.setEnabled(False)
|
||||
self.checkBoxSocksListen.setEnabled(False)
|
||||
self.checkBoxOnionOnly.setEnabled(False)
|
||||
elif comboBoxIndex in (1, 2):
|
||||
self.lineEditSocksHostname.setEnabled(True)
|
||||
self.lineEditSocksPort.setEnabled(True)
|
||||
self.checkBoxAuthentication.setEnabled(True)
|
||||
self.checkBoxSocksListen.setEnabled(True)
|
||||
self.checkBoxOnionOnly.setEnabled(True)
|
||||
if self.checkBoxAuthentication.isChecked():
|
||||
self.lineEditSocksUsername.setEnabled(True)
|
||||
self.lineEditSocksPassword.setEnabled(True)
|
||||
|
@ -334,6 +339,11 @@ class SettingsDialog(QtGui.QDialog):
|
|||
self.lineEditSocksPassword.text()))
|
||||
self.config.set('bitmessagesettings', 'sockslisten', str(
|
||||
self.checkBoxSocksListen.isChecked()))
|
||||
if self.checkBoxOnionOnly.isChecked() \
|
||||
and not self.config.safeGetBoolean('bitmessagesettings', 'onionservicesonly'):
|
||||
self.net_restart_needed = True
|
||||
self.config.set('bitmessagesettings', 'onionservicesonly', str(
|
||||
self.checkBoxOnionOnly.isChecked()))
|
||||
try:
|
||||
# Rounding to integers just for aesthetics
|
||||
self.config.set('bitmessagesettings', 'maxdownloadrate', str(
|
||||
|
|
|
@ -403,6 +403,13 @@
|
|||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="1" colspan="4">
|
||||
<widget class="QCheckBox" name="checkBoxOnionOnly">
|
||||
<property name="text">
|
||||
<string>Only connect to onion services (*.onion)</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="0" column="1">
|
||||
<widget class="QComboBox" name="comboBoxProxyType">
|
||||
<item>
|
||||
|
|
|
@ -43,8 +43,10 @@ BMConfigDefaults = {
|
|||
|
||||
@Singleton
|
||||
class BMConfigParser(ConfigParser.SafeConfigParser):
|
||||
"""Singleton class inherited from ConfigParser.SafeConfigParser
|
||||
with additional methods specific to bitmessage config."""
|
||||
"""
|
||||
Singleton class inherited from :class:`ConfigParser.SafeConfigParser`
|
||||
with additional methods specific to bitmessage config.
|
||||
"""
|
||||
|
||||
_temp = {}
|
||||
|
||||
|
@ -56,7 +58,7 @@ class BMConfigParser(ConfigParser.SafeConfigParser):
|
|||
raise ValueError("Invalid value %s" % value)
|
||||
return ConfigParser.ConfigParser.set(self, section, option, value)
|
||||
|
||||
def get(self, section, option, raw=False, variables=None):
|
||||
def get(self, section, option, raw=False, variables=None): # pylint: disable=arguments-differ
|
||||
try:
|
||||
if section == "bitmessagesettings" and option == "timeformat":
|
||||
return ConfigParser.ConfigParser.get(
|
||||
|
@ -84,6 +86,7 @@ class BMConfigParser(ConfigParser.SafeConfigParser):
|
|||
self._temp[section] = {option: value}
|
||||
|
||||
def safeGetBoolean(self, section, field):
|
||||
"""Return value as boolean, False on exceptions"""
|
||||
try:
|
||||
return self.getboolean(section, field)
|
||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError,
|
||||
|
@ -91,6 +94,7 @@ class BMConfigParser(ConfigParser.SafeConfigParser):
|
|||
return False
|
||||
|
||||
def safeGetInt(self, section, field, default=0):
|
||||
"""Return value as integer, default on exceptions, 0 if default missing"""
|
||||
try:
|
||||
return self.getint(section, field)
|
||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError,
|
||||
|
@ -98,18 +102,22 @@ class BMConfigParser(ConfigParser.SafeConfigParser):
|
|||
return default
|
||||
|
||||
def safeGet(self, section, option, default=None):
|
||||
"""Return value as is, default on exceptions, None if default missing"""
|
||||
try:
|
||||
return self.get(section, option)
|
||||
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError,
|
||||
ValueError, AttributeError):
|
||||
return default
|
||||
|
||||
def items(self, section, raw=False, variables=None):
|
||||
def items(self, section, raw=False, variables=None): # pylint: disable=arguments-differ
|
||||
"""Return section variables as parent, but override the "raw" argument to always True"""
|
||||
return ConfigParser.ConfigParser.items(self, section, True, variables)
|
||||
|
||||
def addresses(self):
|
||||
return filter(
|
||||
lambda x: x.startswith('BM-'), BMConfigParser().sections())
|
||||
@staticmethod
|
||||
def addresses():
|
||||
"""Return a list of local bitmessage addresses (from section labels)"""
|
||||
return [
|
||||
x for x in BMConfigParser().sections() if x.startswith('BM-')]
|
||||
|
||||
def read(self, filenames):
|
||||
ConfigParser.ConfigParser.read(self, filenames)
|
||||
|
@ -130,6 +138,7 @@ class BMConfigParser(ConfigParser.SafeConfigParser):
|
|||
continue
|
||||
|
||||
def save(self):
|
||||
"""Save the runtime config onto the filesystem"""
|
||||
fileName = os.path.join(state.appdata, 'keys.dat')
|
||||
fileNameBak = '.'.join([
|
||||
fileName, datetime.now().strftime("%Y%j%H%M%S%f"), 'bak'])
|
||||
|
@ -151,12 +160,15 @@ class BMConfigParser(ConfigParser.SafeConfigParser):
|
|||
os.remove(fileNameBak)
|
||||
|
||||
def validate(self, section, option, value):
|
||||
"""Input validator interface (using factory pattern)"""
|
||||
try:
|
||||
return getattr(self, 'validate_%s_%s' % (section, option))(value)
|
||||
except AttributeError:
|
||||
return True
|
||||
|
||||
def validate_bitmessagesettings_maxoutboundconnections(self, value):
|
||||
@staticmethod
|
||||
def validate_bitmessagesettings_maxoutboundconnections(value):
|
||||
"""Reject maxoutboundconnections that are too high or too low"""
|
||||
try:
|
||||
value = int(value)
|
||||
except ValueError:
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
|
||||
"""
|
||||
A thread for creating addresses
|
||||
"""
|
||||
import time
|
||||
import hashlib
|
||||
from binascii import hexlify
|
||||
|
@ -11,13 +13,13 @@ import shared
|
|||
import defaults
|
||||
import highlevelcrypto
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from addresses import decodeAddress, encodeAddress, encodeVarint
|
||||
from fallback import RIPEMD160Hash
|
||||
from helper_threading import StoppableThread
|
||||
from network import StoppableThread
|
||||
|
||||
|
||||
class addressGenerator(StoppableThread):
|
||||
"""A thread for creating addresses"""
|
||||
|
||||
name = "addressGenerator"
|
||||
|
||||
|
@ -29,7 +31,11 @@ class addressGenerator(StoppableThread):
|
|||
super(addressGenerator, self).stopThread()
|
||||
|
||||
def run(self):
|
||||
|
||||
"""
|
||||
Process the requests for addresses generation
|
||||
from `.queues.addressGeneratorQueue`
|
||||
"""
|
||||
# pylint: disable=too-many-locals, too-many-branches, protected-access, too-many-statements
|
||||
while state.shutdown == 0:
|
||||
queueValue = queues.addressGeneratorQueue.get()
|
||||
nonceTrialsPerByte = 0
|
||||
|
@ -85,12 +91,12 @@ class addressGenerator(StoppableThread):
|
|||
elif queueValue[0] == 'stopThread':
|
||||
break
|
||||
else:
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Programming error: A structure with the wrong number'
|
||||
' of values was passed into the addressGeneratorQueue.'
|
||||
' Here is the queueValue: %r\n', queueValue)
|
||||
if addressVersionNumber < 3 or addressVersionNumber > 4:
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Program error: For some reason the address generator'
|
||||
' queue has been given a request to create at least'
|
||||
' one version %s address which it cannot do.\n',
|
||||
|
@ -137,10 +143,10 @@ class addressGenerator(StoppableThread):
|
|||
'\x00' * numberOfNullBytesDemandedOnFrontOfRipeHash
|
||||
):
|
||||
break
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'Generated address with ripe digest: %s', hexlify(ripe))
|
||||
try:
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'Address generator calculated %s addresses at %s'
|
||||
' addresses per second before finding one with'
|
||||
' the correct ripe-prefix.',
|
||||
|
@ -202,8 +208,8 @@ class addressGenerator(StoppableThread):
|
|||
elif command == 'createDeterministicAddresses' \
|
||||
or command == 'getDeterministicAddress' \
|
||||
or command == 'createChan' or command == 'joinChan':
|
||||
if len(deterministicPassphrase) == 0:
|
||||
logger.warning(
|
||||
if not deterministicPassphrase:
|
||||
self.logger.warning(
|
||||
'You are creating deterministic'
|
||||
' address(es) using a blank passphrase.'
|
||||
' Bitmessage will do it but it is rather stupid.')
|
||||
|
@ -256,10 +262,10 @@ class addressGenerator(StoppableThread):
|
|||
):
|
||||
break
|
||||
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'Generated address with ripe digest: %s', hexlify(ripe))
|
||||
try:
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'Address generator calculated %s addresses'
|
||||
' at %s addresses per second before finding'
|
||||
' one with the correct ripe-prefix.',
|
||||
|
@ -309,7 +315,7 @@ class addressGenerator(StoppableThread):
|
|||
addressAlreadyExists = True
|
||||
|
||||
if addressAlreadyExists:
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'%s already exists. Not adding it again.',
|
||||
address
|
||||
)
|
||||
|
@ -322,7 +328,7 @@ class addressGenerator(StoppableThread):
|
|||
).arg(address)
|
||||
))
|
||||
else:
|
||||
logger.debug('label: %s', label)
|
||||
self.logger.debug('label: %s', label)
|
||||
BMConfigParser().set(address, 'label', label)
|
||||
BMConfigParser().set(address, 'enabled', 'true')
|
||||
BMConfigParser().set(address, 'decoy', 'false')
|
||||
|
@ -351,7 +357,7 @@ class addressGenerator(StoppableThread):
|
|||
address)
|
||||
shared.myECCryptorObjects[ripe] = \
|
||||
highlevelcrypto.makeCryptor(
|
||||
hexlify(potentialPrivEncryptionKey))
|
||||
hexlify(potentialPrivEncryptionKey))
|
||||
shared.myAddressesByHash[ripe] = address
|
||||
tag = hashlib.sha512(hashlib.sha512(
|
||||
encodeVarint(addressVersionNumber) +
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
"""
|
||||
The objectProcessor thread, of which there is only one, processes the network objects
|
||||
"""
|
||||
import hashlib
|
||||
import logging
|
||||
import random
|
||||
import shared
|
||||
import threading
|
||||
import time
|
||||
from binascii import hexlify
|
||||
|
@ -8,11 +11,13 @@ from subprocess import call # nosec
|
|||
|
||||
import highlevelcrypto
|
||||
import knownnodes
|
||||
import shared
|
||||
from addresses import (
|
||||
calculateInventoryHash, decodeAddress, decodeVarint, encodeAddress,
|
||||
encodeVarint, varintDecodeError
|
||||
)
|
||||
from bmconfigparser import BMConfigParser
|
||||
|
||||
import helper_bitcoin
|
||||
import helper_inbox
|
||||
import helper_msgcoding
|
||||
|
@ -20,13 +25,18 @@ import helper_sent
|
|||
from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery
|
||||
from helper_ackPayload import genAckPayload
|
||||
from network import bmproto
|
||||
from network.node import Peer
|
||||
|
||||
import protocol
|
||||
import queues
|
||||
import state
|
||||
import tr
|
||||
from debug import logger
|
||||
from fallback import RIPEMD160Hash
|
||||
|
||||
import l10n
|
||||
# pylint: disable=too-many-locals, too-many-return-statements, too-many-branches, too-many-statements
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class objectProcessor(threading.Thread):
|
||||
|
@ -55,6 +65,7 @@ class objectProcessor(threading.Thread):
|
|||
self.successfullyDecryptMessageTimings = []
|
||||
|
||||
def run(self):
|
||||
"""Process the objects from `.queues.objectProcessorQueue`"""
|
||||
while True:
|
||||
objectType, data = queues.objectProcessorQueue.get()
|
||||
|
||||
|
@ -118,7 +129,10 @@ class objectProcessor(threading.Thread):
|
|||
state.shutdown = 2
|
||||
break
|
||||
|
||||
def checkackdata(self, data):
|
||||
@staticmethod
|
||||
def checkackdata(data):
|
||||
"""Checking Acknowledgement of message received or not?"""
|
||||
# pylint: disable=protected-access
|
||||
# Let's check whether this is a message acknowledgement bound for us.
|
||||
if len(data) < 32:
|
||||
return
|
||||
|
@ -158,7 +172,7 @@ class objectProcessor(threading.Thread):
|
|||
|
||||
if not host:
|
||||
return
|
||||
peer = state.Peer(host, port)
|
||||
peer = Peer(host, port)
|
||||
with knownnodes.knownNodesLock:
|
||||
knownnodes.addKnownNode(
|
||||
stream, peer, is_self=state.ownAddresses.get(peer))
|
||||
|
@ -268,6 +282,7 @@ class objectProcessor(threading.Thread):
|
|||
queues.workerQueue.put(('sendOutOrStoreMyV4Pubkey', myAddress))
|
||||
|
||||
def processpubkey(self, data):
|
||||
"""Process a pubkey object"""
|
||||
pubkeyProcessingStartTime = time.time()
|
||||
shared.numberOfPubkeysProcessed += 1
|
||||
queues.UISignalQueue.put((
|
||||
|
@ -316,13 +331,14 @@ class objectProcessor(threading.Thread):
|
|||
'\x04' + publicSigningKey + '\x04' + publicEncryptionKey)
|
||||
ripe = RIPEMD160Hash(sha.digest()).digest()
|
||||
|
||||
logger.debug(
|
||||
'within recpubkey, addressVersion: %s, streamNumber: %s'
|
||||
'\nripe %s\npublicSigningKey in hex: %s'
|
||||
'\npublicEncryptionKey in hex: %s',
|
||||
addressVersion, streamNumber, hexlify(ripe),
|
||||
hexlify(publicSigningKey), hexlify(publicEncryptionKey)
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
'within recpubkey, addressVersion: %s, streamNumber: %s'
|
||||
'\nripe %s\npublicSigningKey in hex: %s'
|
||||
'\npublicEncryptionKey in hex: %s',
|
||||
addressVersion, streamNumber, hexlify(ripe),
|
||||
hexlify(publicSigningKey), hexlify(publicEncryptionKey)
|
||||
)
|
||||
|
||||
address = encodeAddress(addressVersion, streamNumber, ripe)
|
||||
|
||||
|
@ -380,13 +396,14 @@ class objectProcessor(threading.Thread):
|
|||
sha.update(publicSigningKey + publicEncryptionKey)
|
||||
ripe = RIPEMD160Hash(sha.digest()).digest()
|
||||
|
||||
logger.debug(
|
||||
'within recpubkey, addressVersion: %s, streamNumber: %s'
|
||||
'\nripe %s\npublicSigningKey in hex: %s'
|
||||
'\npublicEncryptionKey in hex: %s',
|
||||
addressVersion, streamNumber, hexlify(ripe),
|
||||
hexlify(publicSigningKey), hexlify(publicEncryptionKey)
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
'within recpubkey, addressVersion: %s, streamNumber: %s'
|
||||
'\nripe %s\npublicSigningKey in hex: %s'
|
||||
'\npublicEncryptionKey in hex: %s',
|
||||
addressVersion, streamNumber, hexlify(ripe),
|
||||
hexlify(publicSigningKey), hexlify(publicEncryptionKey)
|
||||
)
|
||||
|
||||
address = encodeAddress(addressVersion, streamNumber, ripe)
|
||||
queryreturn = sqlQuery(
|
||||
|
@ -438,6 +455,7 @@ class objectProcessor(threading.Thread):
|
|||
timeRequiredToProcessPubkey)
|
||||
|
||||
def processmsg(self, data):
|
||||
"""Process a message object"""
|
||||
messageProcessingStartTime = time.time()
|
||||
shared.numberOfMessagesProcessed += 1
|
||||
queues.UISignalQueue.put((
|
||||
|
@ -579,17 +597,18 @@ class objectProcessor(threading.Thread):
|
|||
logger.debug('ECDSA verify failed')
|
||||
return
|
||||
logger.debug('ECDSA verify passed')
|
||||
logger.debug(
|
||||
'As a matter of intellectual curiosity, here is the Bitcoin'
|
||||
' address associated with the keys owned by the other person:'
|
||||
' %s ..and here is the testnet address: %s. The other person'
|
||||
' must take their private signing key from Bitmessage and'
|
||||
' import it into Bitcoin (or a service like Blockchain.info)'
|
||||
' for it to be of any use. Do not use this unless you know'
|
||||
' what you are doing.',
|
||||
helper_bitcoin.calculateBitcoinAddressFromPubkey(pubSigningKey),
|
||||
helper_bitcoin.calculateTestnetAddressFromPubkey(pubSigningKey)
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
'As a matter of intellectual curiosity, here is the Bitcoin'
|
||||
' address associated with the keys owned by the other person:'
|
||||
' %s ..and here is the testnet address: %s. The other person'
|
||||
' must take their private signing key from Bitmessage and'
|
||||
' import it into Bitcoin (or a service like Blockchain.info)'
|
||||
' for it to be of any use. Do not use this unless you know'
|
||||
' what you are doing.',
|
||||
helper_bitcoin.calculateBitcoinAddressFromPubkey(pubSigningKey),
|
||||
helper_bitcoin.calculateTestnetAddressFromPubkey(pubSigningKey)
|
||||
)
|
||||
# Used to detect and ignore duplicate messages in our inbox
|
||||
sigHash = hashlib.sha512(
|
||||
hashlib.sha512(signature).digest()).digest()[32:]
|
||||
|
@ -732,7 +751,7 @@ class objectProcessor(threading.Thread):
|
|||
# We really should have a discussion about how to
|
||||
# set the TTL for mailing list broadcasts. This is obviously
|
||||
# hard-coded.
|
||||
TTL = 2*7*24*60*60 # 2 weeks
|
||||
TTL = 2 * 7 * 24 * 60 * 60 # 2 weeks
|
||||
t = ('',
|
||||
toAddress,
|
||||
ripe,
|
||||
|
@ -784,6 +803,7 @@ class objectProcessor(threading.Thread):
|
|||
)
|
||||
|
||||
def processbroadcast(self, data):
|
||||
"""Process a broadcast object"""
|
||||
messageProcessingStartTime = time.time()
|
||||
shared.numberOfBroadcastsProcessed += 1
|
||||
queues.UISignalQueue.put((
|
||||
|
@ -968,7 +988,7 @@ class objectProcessor(threading.Thread):
|
|||
|
||||
fromAddress = encodeAddress(
|
||||
sendersAddressVersion, sendersStream, calculatedRipe)
|
||||
logger.info('fromAddress: %s' % fromAddress)
|
||||
logger.info('fromAddress: %s', fromAddress)
|
||||
|
||||
# Let's store the public key in case we want to reply to this person.
|
||||
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''',
|
||||
|
@ -985,7 +1005,7 @@ class objectProcessor(threading.Thread):
|
|||
|
||||
fromAddress = encodeAddress(
|
||||
sendersAddressVersion, sendersStream, calculatedRipe)
|
||||
logger.debug('fromAddress: ' + fromAddress)
|
||||
logger.debug('fromAddress: %s', fromAddress)
|
||||
|
||||
try:
|
||||
decodedMessage = helper_msgcoding.MsgDecode(
|
||||
|
@ -1046,17 +1066,18 @@ class objectProcessor(threading.Thread):
|
|||
# for it.
|
||||
elif addressVersion >= 4:
|
||||
tag = hashlib.sha512(hashlib.sha512(
|
||||
encodeVarint(addressVersion) + encodeVarint(streamNumber) + ripe
|
||||
encodeVarint(addressVersion) + encodeVarint(streamNumber)
|
||||
+ ripe
|
||||
).digest()).digest()[32:]
|
||||
if tag in state.neededPubkeys:
|
||||
del state.neededPubkeys[tag]
|
||||
self.sendMessages(address)
|
||||
|
||||
def sendMessages(self, address):
|
||||
@staticmethod
|
||||
def sendMessages(address):
|
||||
"""
|
||||
This function is called by the possibleNewPubkey function when
|
||||
that function sees that we now have the necessary pubkey
|
||||
to send one or more messages.
|
||||
This method is called by the `possibleNewPubkey` when it sees
|
||||
that we now have the necessary pubkey to send one or more messages.
|
||||
"""
|
||||
logger.info('We have been awaiting the arrival of this pubkey.')
|
||||
sqlExecute(
|
||||
|
@ -1066,7 +1087,9 @@ class objectProcessor(threading.Thread):
|
|||
" AND folder='sent'", address)
|
||||
queues.workerQueue.put(('sendmessage', ''))
|
||||
|
||||
def ackDataHasAValidHeader(self, ackData):
|
||||
@staticmethod
|
||||
def ackDataHasAValidHeader(ackData):
|
||||
"""Checking ackData with valid Header, not sending ackData when false"""
|
||||
if len(ackData) < protocol.Header.size:
|
||||
logger.info(
|
||||
'The length of ackData is unreasonably short. Not sending'
|
||||
|
@ -1101,11 +1124,12 @@ class objectProcessor(threading.Thread):
|
|||
return False
|
||||
return True
|
||||
|
||||
def addMailingListNameToSubject(self, subject, mailingListName):
|
||||
@staticmethod
|
||||
def addMailingListNameToSubject(subject, mailingListName):
|
||||
"""Adding mailingListName to subject"""
|
||||
subject = subject.strip()
|
||||
if subject[:3] == 'Re:' or subject[:3] == 'RE:':
|
||||
subject = subject[3:].strip()
|
||||
if '[' + mailingListName + ']' in subject:
|
||||
return subject
|
||||
else:
|
||||
return '[' + mailingListName + '] ' + subject
|
||||
return '[' + mailingListName + '] ' + subject
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
import Queue
|
||||
import threading
|
||||
import time
|
||||
|
||||
class ObjectProcessorQueue(Queue.Queue):
|
||||
maxSize = 32000000
|
||||
|
||||
def __init__(self):
|
||||
Queue.Queue.__init__(self)
|
||||
self.sizeLock = threading.Lock()
|
||||
self.curSize = 0 # in Bytes. We maintain this to prevent nodes from flooing us with objects which take up too much memory. If this gets too big we'll sleep before asking for further objects.
|
||||
|
||||
def put(self, item, block = True, timeout = None):
|
||||
while self.curSize >= self.maxSize:
|
||||
time.sleep(1)
|
||||
with self.sizeLock:
|
||||
self.curSize += len(item[1])
|
||||
Queue.Queue.put(self, item, block, timeout)
|
||||
|
||||
def get(self, block = True, timeout = None):
|
||||
item = Queue.Queue.get(self, block, timeout)
|
||||
with self.sizeLock:
|
||||
self.curSize -= len(item[1])
|
||||
return item
|
|
@ -1,20 +1,21 @@
|
|||
"""
|
||||
The singleCleaner class is a timer-driven thread that cleans data structures
|
||||
The `singleCleaner` class is a timer-driven thread that cleans data structures
|
||||
to free memory, resends messages when a remote node doesn't respond, and
|
||||
sends pong messages to keep connections alive if the network isn't busy.
|
||||
|
||||
It cleans these data structures in memory:
|
||||
inventory (moves data to the on-disk sql database)
|
||||
inventorySets (clears then reloads data out of sql database)
|
||||
- inventory (moves data to the on-disk sql database)
|
||||
- inventorySets (clears then reloads data out of sql database)
|
||||
|
||||
It cleans these tables on the disk:
|
||||
inventory (clears expired objects)
|
||||
pubkeys (clears pubkeys older than 4 weeks old which we have not used
|
||||
personally)
|
||||
knownNodes (clears addresses which have not been online for over 3 days)
|
||||
- inventory (clears expired objects)
|
||||
- pubkeys (clears pubkeys older than 4 weeks old which we have not used
|
||||
personally)
|
||||
- knownNodes (clears addresses which have not been online for over 3 days)
|
||||
|
||||
It resends messages when there has been no response:
|
||||
resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...)
|
||||
resends msg messages in 5 days (then 10 days, then 20 days, etc...)
|
||||
- resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...)
|
||||
- resends msg messages in 5 days (then 10 days, then 20 days, etc...)
|
||||
|
||||
"""
|
||||
# pylint: disable=relative-import, protected-access
|
||||
|
@ -23,36 +24,35 @@ import os
|
|||
import time
|
||||
import shared
|
||||
|
||||
import knownnodes
|
||||
import queues
|
||||
import shared
|
||||
import state
|
||||
import tr
|
||||
from bmconfigparser import BMConfigParser
|
||||
from helper_sql import sqlQuery, sqlExecute
|
||||
from helper_threading import StoppableThread
|
||||
from inventory import Inventory
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from debug import logger
|
||||
from datetime import datetime, timedelta
|
||||
import knownnodes
|
||||
import queues
|
||||
import state
|
||||
from network import BMConnectionPool, StoppableThread
|
||||
|
||||
|
||||
class singleCleaner(StoppableThread):
|
||||
"""Base method that Cleanup knownnodes and handle possible severe exception"""
|
||||
"""The singleCleaner thread class"""
|
||||
name = "singleCleaner"
|
||||
cycleLength = 300
|
||||
expireDiscoveredPeers = 300
|
||||
|
||||
def run(self): # pylint: disable=too-many-branches
|
||||
def run(self): # pylint: disable=too-many-branches
|
||||
gc.disable()
|
||||
timeWeLastClearedInventoryAndPubkeysTables = 0
|
||||
try:
|
||||
shared.maximumLengthOfTimeToBotherResendingMessages = (
|
||||
float(BMConfigParser().get(
|
||||
'bitmessagesettings', 'stopresendingafterxdays')) *
|
||||
24 * 60 * 60
|
||||
'bitmessagesettings', 'stopresendingafterxdays'))
|
||||
* 24 * 60 * 60
|
||||
) + (
|
||||
float(BMConfigParser().get(
|
||||
'bitmessagesettings', 'stopresendingafterxmonths')) *
|
||||
(60 * 60 * 24 * 365) / 12)
|
||||
'bitmessagesettings', 'stopresendingafterxmonths'))
|
||||
* (60 * 60 * 24 * 365) / 12)
|
||||
except:
|
||||
# Either the user hasn't set stopresendingafterxdays and
|
||||
# stopresendingafterxmonths yet or the options are missing
|
||||
|
@ -93,13 +93,13 @@ class singleCleaner(StoppableThread):
|
|||
queryreturn = sqlQuery(
|
||||
"SELECT toaddress, ackdata, status FROM sent"
|
||||
" WHERE ((status='awaitingpubkey' OR status='msgsent')"
|
||||
" AND folder LIKE '%sent%' AND sleeptill<? AND senttime>?)",
|
||||
int(time.time()), int(time.time()) -
|
||||
shared.maximumLengthOfTimeToBotherResendingMessages
|
||||
" AND folder='sent' AND sleeptill<? AND senttime>?)",
|
||||
int(time.time()), int(time.time())
|
||||
- shared.maximumLengthOfTimeToBotherResendingMessages
|
||||
)
|
||||
for row in queryreturn:
|
||||
if len(row) < 2:
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Something went wrong in the singleCleaner thread:'
|
||||
' a query did not return the requested fields. %r',
|
||||
row
|
||||
|
@ -108,17 +108,18 @@ class singleCleaner(StoppableThread):
|
|||
break
|
||||
toAddress, ackData, status = row
|
||||
if status == 'awaitingpubkey':
|
||||
resendPubkeyRequest(toAddress)
|
||||
self.resendPubkeyRequest(toAddress)
|
||||
elif status == 'msgsent':
|
||||
resendMsg(ackData)
|
||||
self.resendMsg(ackData)
|
||||
deleteTrashMsgPermonantly()
|
||||
try:
|
||||
# Cleanup knownnodes and handle possible severe exception
|
||||
# while writing it to disk
|
||||
knownnodes.cleanupKnownNodes()
|
||||
except Exception as err:
|
||||
# pylint: disable=protected-access
|
||||
if "Errno 28" in str(err):
|
||||
logger.fatal(
|
||||
self.logger.fatal(
|
||||
'(while writing knownnodes to disk)'
|
||||
' Alert: Your disk or data storage volume is full.'
|
||||
)
|
||||
|
@ -131,20 +132,11 @@ class singleCleaner(StoppableThread):
|
|||
' is full. Bitmessage will now exit.'),
|
||||
True)
|
||||
))
|
||||
# ..FIXME redundant?
|
||||
# pylint: disable=no-member
|
||||
if shared.daemon or not state.enableGUI:
|
||||
if shared.thisapp.daemon or not state.enableGUI:
|
||||
os._exit(1)
|
||||
|
||||
# # clear download queues
|
||||
# for thread in threading.enumerate():
|
||||
# if thread.isAlive() and hasattr(thread, 'downloadQueue'):
|
||||
# thread.downloadQueue.clear()
|
||||
|
||||
# inv/object tracking
|
||||
for connection in \
|
||||
BMConnectionPool().inboundConnections.values() + \
|
||||
BMConnectionPool().outboundConnections.values():
|
||||
for connection in BMConnectionPool().connections():
|
||||
connection.clean()
|
||||
|
||||
# discovery tracking
|
||||
|
@ -155,53 +147,52 @@ class singleCleaner(StoppableThread):
|
|||
del state.discoveredPeers[k]
|
||||
except KeyError:
|
||||
pass
|
||||
# ..TODO: cleanup pending upload / download
|
||||
|
||||
# ..todo:: cleanup pending upload / download
|
||||
|
||||
gc.collect()
|
||||
|
||||
if state.shutdown == 0:
|
||||
self.stop.wait(singleCleaner.cycleLength)
|
||||
|
||||
def resendPubkeyRequest(self, address):
|
||||
"""Resend pubkey request for address"""
|
||||
self.logger.debug(
|
||||
'It has been a long time and we haven\'t heard a response to our'
|
||||
' getpubkey request. Sending again.'
|
||||
)
|
||||
try:
|
||||
# We need to take this entry out of the neededPubkeys structure
|
||||
# because the queues.workerQueue checks to see whether the entry
|
||||
# is already present and will not do the POW and send the message
|
||||
# because it assumes that it has already done it recently.
|
||||
del state.neededPubkeys[address]
|
||||
except:
|
||||
pass
|
||||
|
||||
def resendPubkeyRequest(address):
|
||||
"""After a long time, method send getpubkey request"""
|
||||
logger.debug(
|
||||
'It has been a long time and we haven\'t heard a response to our'
|
||||
' getpubkey request. Sending again.'
|
||||
)
|
||||
try:
|
||||
# We need to take this entry out of the neededPubkeys structure
|
||||
# because the queues.workerQueue checks to see whether the entry
|
||||
# is already present and will not do the POW and send the message
|
||||
# because it assumes that it has already done it recently.
|
||||
del state.neededPubkeys[address]
|
||||
except:
|
||||
pass
|
||||
queues.UISignalQueue.put((
|
||||
'updateStatusBar',
|
||||
'Doing work necessary to again attempt to request a public key...'
|
||||
))
|
||||
sqlExecute(
|
||||
'''UPDATE sent SET status='msgqueued' WHERE toaddress=?''',
|
||||
address)
|
||||
queues.workerQueue.put(('sendmessage', ''))
|
||||
|
||||
queues.UISignalQueue.put((
|
||||
'updateStatusBar',
|
||||
'Doing work necessary to again attempt to request a public key...'
|
||||
))
|
||||
sqlExecute(
|
||||
'''UPDATE sent SET status='msgqueued' WHERE toaddress=?''',
|
||||
address)
|
||||
queues.workerQueue.put(('sendmessage', ''))
|
||||
|
||||
|
||||
def resendMsg(ackdata):
|
||||
"""After a long time, method send acknowledgement msg"""
|
||||
logger.debug(
|
||||
'It has been a long time and we haven\'t heard an acknowledgement'
|
||||
' to our msg. Sending again.'
|
||||
)
|
||||
sqlExecute(
|
||||
'''UPDATE sent SET status='msgqueued' WHERE ackdata=?''',
|
||||
ackdata)
|
||||
queues.workerQueue.put(('sendmessage', ''))
|
||||
queues.UISignalQueue.put((
|
||||
'updateStatusBar',
|
||||
'Doing work necessary to again attempt to deliver a message...'
|
||||
))
|
||||
def resendMsg(self, ackdata):
|
||||
"""Resend message by ackdata"""
|
||||
self.logger.debug(
|
||||
'It has been a long time and we haven\'t heard an acknowledgement'
|
||||
' to our msg. Sending again.'
|
||||
)
|
||||
sqlExecute(
|
||||
'''UPDATE sent SET status='msgqueued' WHERE ackdata=?''',
|
||||
ackdata)
|
||||
queues.workerQueue.put(('sendmessage', ''))
|
||||
queues.UISignalQueue.put((
|
||||
'updateStatusBar',
|
||||
'Doing work necessary to again attempt to deliver a message...'
|
||||
))
|
||||
|
||||
|
||||
def deleteTrashMsgPermonantly():
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
"""
|
||||
src/class_singleWorker.py
|
||||
=========================
|
||||
Thread for performing PoW
|
||||
"""
|
||||
# pylint: disable=protected-access,too-many-branches,too-many-statements
|
||||
# pylint: disable=no-self-use,too-many-lines,too-many-locals,relative-import
|
||||
|
@ -26,10 +25,9 @@ import state
|
|||
import tr
|
||||
from addresses import calculateInventoryHash, decodeAddress, decodeVarint, encodeVarint
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from helper_sql import sqlExecute, sqlQuery
|
||||
from helper_threading import StoppableThread
|
||||
from inventory import Inventory
|
||||
from network import StoppableThread
|
||||
|
||||
# This thread, of which there is only one, does the heavy lifting:
|
||||
# calculating POWs.
|
||||
|
@ -101,7 +99,7 @@ class singleWorker(StoppableThread):
|
|||
'''SELECT ackdata FROM sent WHERE status = 'msgsent' ''')
|
||||
for row in queryreturn:
|
||||
ackdata, = row
|
||||
logger.info('Watching for ackdata %s', hexlify(ackdata))
|
||||
self.logger.info('Watching for ackdata %s', hexlify(ackdata))
|
||||
shared.ackdataForWhichImWatching[ackdata] = 0
|
||||
|
||||
# Fix legacy (headerless) watched ackdata to include header
|
||||
|
@ -176,14 +174,14 @@ class singleWorker(StoppableThread):
|
|||
self.busy = 0
|
||||
return
|
||||
else:
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Probable programming error: The command sent'
|
||||
' to the workerThread is weird. It is: %s\n',
|
||||
command
|
||||
)
|
||||
|
||||
queues.workerQueue.task_done()
|
||||
logger.info("Quitting...")
|
||||
self.logger.info("Quitting...")
|
||||
|
||||
def _getKeysForAddress(self, address):
|
||||
privSigningKeyBase58 = BMConfigParser().get(
|
||||
|
@ -220,25 +218,24 @@ class singleWorker(StoppableThread):
|
|||
)) / (2 ** 16))
|
||||
))
|
||||
initialHash = hashlib.sha512(payload).digest()
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'%s Doing proof of work... TTL set to %s', log_prefix, TTL)
|
||||
if log_time:
|
||||
start_time = time.time()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'%s Found proof of work %s Nonce: %s',
|
||||
log_prefix, trialValue, nonce
|
||||
)
|
||||
try:
|
||||
delta = time.time() - start_time
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'PoW took %.1f seconds, speed %s.',
|
||||
delta, sizeof_fmt(nonce / delta)
|
||||
)
|
||||
except: # NameError
|
||||
pass
|
||||
payload = pack('>Q', nonce) + payload
|
||||
# inventoryHash = calculateInventoryHash(payload)
|
||||
return payload
|
||||
|
||||
def doPOWForMyV2Pubkey(self, adressHash):
|
||||
|
@ -263,7 +260,7 @@ class singleWorker(StoppableThread):
|
|||
_, _, pubSigningKey, pubEncryptionKey = \
|
||||
self._getKeysForAddress(myAddress)
|
||||
except Exception as err:
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Error within doPOWForMyV2Pubkey. Could not read'
|
||||
' the keys from the keys.dat file for a requested'
|
||||
' address. %s\n', err
|
||||
|
@ -281,7 +278,8 @@ class singleWorker(StoppableThread):
|
|||
Inventory()[inventoryHash] = (
|
||||
objectType, streamNumber, payload, embeddedTime, '')
|
||||
|
||||
logger.info('broadcasting inv with hash: %s', hexlify(inventoryHash))
|
||||
self.logger.info(
|
||||
'broadcasting inv with hash: %s', hexlify(inventoryHash))
|
||||
|
||||
queues.invQueue.put((streamNumber, inventoryHash))
|
||||
queues.UISignalQueue.put(('updateStatusBar', ''))
|
||||
|
@ -306,7 +304,7 @@ class singleWorker(StoppableThread):
|
|||
# The address has been deleted.
|
||||
return
|
||||
if BMConfigParser().safeGetBoolean(myAddress, 'chan'):
|
||||
logger.info('This is a chan address. Not sending pubkey.')
|
||||
self.logger.info('This is a chan address. Not sending pubkey.')
|
||||
return
|
||||
_, addressVersionNumber, streamNumber, adressHash = decodeAddress(
|
||||
myAddress)
|
||||
|
@ -336,7 +334,7 @@ class singleWorker(StoppableThread):
|
|||
privSigningKeyHex, _, pubSigningKey, pubEncryptionKey = \
|
||||
self._getKeysForAddress(myAddress)
|
||||
except Exception as err:
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Error within sendOutOrStoreMyV3Pubkey. Could not read'
|
||||
' the keys from the keys.dat file for a requested'
|
||||
' address. %s\n', err
|
||||
|
@ -363,7 +361,8 @@ class singleWorker(StoppableThread):
|
|||
Inventory()[inventoryHash] = (
|
||||
objectType, streamNumber, payload, embeddedTime, '')
|
||||
|
||||
logger.info('broadcasting inv with hash: %s', hexlify(inventoryHash))
|
||||
self.logger.info(
|
||||
'broadcasting inv with hash: %s', hexlify(inventoryHash))
|
||||
|
||||
queues.invQueue.put((streamNumber, inventoryHash))
|
||||
queues.UISignalQueue.put(('updateStatusBar', ''))
|
||||
|
@ -386,7 +385,7 @@ class singleWorker(StoppableThread):
|
|||
# The address has been deleted.
|
||||
return
|
||||
if shared.BMConfigParser().safeGetBoolean(myAddress, 'chan'):
|
||||
logger.info('This is a chan address. Not sending pubkey.')
|
||||
self.logger.info('This is a chan address. Not sending pubkey.')
|
||||
return
|
||||
_, addressVersionNumber, streamNumber, addressHash = decodeAddress(
|
||||
myAddress)
|
||||
|
@ -405,7 +404,7 @@ class singleWorker(StoppableThread):
|
|||
privSigningKeyHex, _, pubSigningKey, pubEncryptionKey = \
|
||||
self._getKeysForAddress(myAddress)
|
||||
except Exception as err:
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Error within sendOutOrStoreMyV4Pubkey. Could not read'
|
||||
' the keys from the keys.dat file for a requested'
|
||||
' address. %s\n', err
|
||||
|
@ -453,7 +452,8 @@ class singleWorker(StoppableThread):
|
|||
doubleHashOfAddressData[32:]
|
||||
)
|
||||
|
||||
logger.info('broadcasting inv with hash: %s', hexlify(inventoryHash))
|
||||
self.logger.info(
|
||||
'broadcasting inv with hash: %s', hexlify(inventoryHash))
|
||||
|
||||
queues.invQueue.put((streamNumber, inventoryHash))
|
||||
queues.UISignalQueue.put(('updateStatusBar', ''))
|
||||
|
@ -462,7 +462,7 @@ class singleWorker(StoppableThread):
|
|||
myAddress, 'lastpubkeysendtime', str(int(time.time())))
|
||||
BMConfigParser().save()
|
||||
except Exception as err:
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Error: Couldn\'t add the lastpubkeysendtime'
|
||||
' to the keys.dat file. Error message: %s', err
|
||||
)
|
||||
|
@ -470,8 +470,8 @@ class singleWorker(StoppableThread):
|
|||
def sendOnionPeerObj(self, peer=None):
|
||||
"""Send onionpeer object representing peer"""
|
||||
if not peer: # find own onionhostname
|
||||
for peer in state.ownAddresses: # pylint: disable=redefined-argument-from-local
|
||||
if peer.host.endswith('.onion'):
|
||||
for peer_ in state.ownAddresses:
|
||||
if peer_.host.endswith('.onion'):
|
||||
break
|
||||
else:
|
||||
return
|
||||
|
@ -500,7 +500,7 @@ class singleWorker(StoppableThread):
|
|||
objectType, streamNumber, buffer(payload),
|
||||
embeddedTime, buffer(tag)
|
||||
)
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'sending inv (within sendOnionPeerObj function) for object: %s',
|
||||
hexlify(inventoryHash))
|
||||
queues.invQueue.put((streamNumber, inventoryHash))
|
||||
|
@ -523,7 +523,7 @@ class singleWorker(StoppableThread):
|
|||
_, addressVersionNumber, streamNumber, ripe = \
|
||||
decodeAddress(fromaddress)
|
||||
if addressVersionNumber <= 1:
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Error: In the singleWorker thread, the '
|
||||
' sendBroadcast function doesn\'t understand'
|
||||
' the address version.\n')
|
||||
|
@ -639,7 +639,7 @@ class singleWorker(StoppableThread):
|
|||
# to not let the user try to send a message this large
|
||||
# until we implement message continuation.
|
||||
if len(payload) > 2 ** 18: # 256 KiB
|
||||
logger.critical(
|
||||
self.logger.critical(
|
||||
'This broadcast object is too large to send.'
|
||||
' This should never happen. Object size: %s',
|
||||
len(payload)
|
||||
|
@ -650,7 +650,7 @@ class singleWorker(StoppableThread):
|
|||
objectType = 3
|
||||
Inventory()[inventoryHash] = (
|
||||
objectType, streamNumber, payload, embeddedTime, tag)
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'sending inv (within sendBroadcast function)'
|
||||
' for object: %s',
|
||||
hexlify(inventoryHash)
|
||||
|
@ -870,8 +870,8 @@ class singleWorker(StoppableThread):
|
|||
"MainWindow",
|
||||
"Looking up the receiver\'s public key"))
|
||||
))
|
||||
logger.info('Sending a message.')
|
||||
logger.debug(
|
||||
self.logger.info('Sending a message.')
|
||||
self.logger.debug(
|
||||
'First 150 characters of message: %s',
|
||||
repr(message[:150])
|
||||
)
|
||||
|
@ -915,7 +915,7 @@ class singleWorker(StoppableThread):
|
|||
if not shared.BMConfigParser().safeGetBoolean(
|
||||
'bitmessagesettings', 'willinglysendtomobile'
|
||||
):
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'The receiver is a mobile user but the'
|
||||
' sender (you) has not selected that you'
|
||||
' are willing to send to mobiles. Aborting'
|
||||
|
@ -981,7 +981,7 @@ class singleWorker(StoppableThread):
|
|||
defaults.networkDefaultPayloadLengthExtraBytes:
|
||||
requiredPayloadLengthExtraBytes = \
|
||||
defaults.networkDefaultPayloadLengthExtraBytes
|
||||
logger.debug(
|
||||
self.logger.debug(
|
||||
'Using averageProofOfWorkNonceTrialsPerByte: %s'
|
||||
' and payloadLengthExtraBytes: %s.',
|
||||
requiredAverageProofOfWorkNonceTrialsPerByte,
|
||||
|
@ -1046,8 +1046,9 @@ class singleWorker(StoppableThread):
|
|||
l10n.formatTimestamp()))))
|
||||
continue
|
||||
else: # if we are sending a message to ourselves or a chan..
|
||||
logger.info('Sending a message.')
|
||||
logger.debug('First 150 characters of message: %r', message[:150])
|
||||
self.logger.info('Sending a message.')
|
||||
self.logger.debug(
|
||||
'First 150 characters of message: %r', message[:150])
|
||||
behaviorBitfield = protocol.getBitfield(fromaddress)
|
||||
|
||||
try:
|
||||
|
@ -1066,7 +1067,7 @@ class singleWorker(StoppableThread):
|
|||
" message. %1"
|
||||
).arg(l10n.formatTimestamp()))
|
||||
))
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Error within sendMsg. Could not read the keys'
|
||||
' from the keys.dat file for our own address. %s\n',
|
||||
err)
|
||||
|
@ -1142,14 +1143,14 @@ class singleWorker(StoppableThread):
|
|||
payload += encodeVarint(encodedMessage.length)
|
||||
payload += encodedMessage.data
|
||||
if BMConfigParser().has_section(toaddress):
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'Not bothering to include ackdata because we are'
|
||||
' sending to ourselves or a chan.'
|
||||
)
|
||||
fullAckPayload = ''
|
||||
elif not protocol.checkBitfield(
|
||||
behaviorBitfield, protocol.BITFIELD_DOESACK):
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'Not bothering to include ackdata because'
|
||||
' the receiver said that they won\'t relay it anyway.'
|
||||
)
|
||||
|
@ -1202,7 +1203,7 @@ class singleWorker(StoppableThread):
|
|||
requiredPayloadLengthExtraBytes
|
||||
)) / (2 ** 16))
|
||||
))
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'(For msg message) Doing proof of work. Total required'
|
||||
' difficulty: %f. Required small message difficulty: %f.',
|
||||
float(requiredAverageProofOfWorkNonceTrialsPerByte) /
|
||||
|
@ -1215,12 +1216,12 @@ class singleWorker(StoppableThread):
|
|||
initialHash = hashlib.sha512(encryptedPayload).digest()
|
||||
trialValue, nonce = proofofwork.run(target, initialHash)
|
||||
print("nonce calculated value#############################", nonce)
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'(For msg message) Found proof of work %s Nonce: %s',
|
||||
trialValue, nonce
|
||||
)
|
||||
try:
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'PoW took %.1f seconds, speed %s.',
|
||||
time.time() - powStartTime,
|
||||
sizeof_fmt(nonce / (time.time() - powStartTime))
|
||||
|
@ -1235,7 +1236,7 @@ class singleWorker(StoppableThread):
|
|||
# in the code to not let the user try to send a message
|
||||
# this large until we implement message continuation.
|
||||
if len(encryptedPayload) > 2 ** 18: # 256 KiB
|
||||
logger.critical(
|
||||
self.logger.critical(
|
||||
'This msg object is too large to send. This should'
|
||||
' never happen. Object size: %i',
|
||||
len(encryptedPayload)
|
||||
|
@ -1266,7 +1267,7 @@ class singleWorker(StoppableThread):
|
|||
" Sent on %1"
|
||||
).arg(l10n.formatTimestamp()))
|
||||
))
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'Broadcasting inv for my msg(within sendmsg function): %s',
|
||||
hexlify(inventoryHash)
|
||||
)
|
||||
|
@ -1319,7 +1320,7 @@ class singleWorker(StoppableThread):
|
|||
toStatus, addressVersionNumber, streamNumber, ripe = decodeAddress(
|
||||
toAddress)
|
||||
if toStatus != 'success':
|
||||
logger.error(
|
||||
self.logger.error(
|
||||
'Very abnormal error occurred in requestPubKey.'
|
||||
' toAddress is: %r. Please report this error to Atheros.',
|
||||
toAddress
|
||||
|
@ -1333,7 +1334,7 @@ class singleWorker(StoppableThread):
|
|||
toAddress
|
||||
)
|
||||
if not queryReturn:
|
||||
logger.critical(
|
||||
self.logger.critical(
|
||||
'BUG: Why are we requesting the pubkey for %s'
|
||||
' if there are no messages in the sent folder'
|
||||
' to that address?', toAddress
|
||||
|
@ -1381,11 +1382,11 @@ class singleWorker(StoppableThread):
|
|||
payload += encodeVarint(streamNumber)
|
||||
if addressVersionNumber <= 3:
|
||||
payload += ripe
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'making request for pubkey with ripe: %s', hexlify(ripe))
|
||||
else:
|
||||
payload += tag
|
||||
logger.info(
|
||||
self.logger.info(
|
||||
'making request for v4 pubkey with tag: %s', hexlify(tag))
|
||||
|
||||
# print 'trial value', trialValue
|
||||
|
@ -1406,7 +1407,7 @@ class singleWorker(StoppableThread):
|
|||
objectType = 1
|
||||
Inventory()[inventoryHash] = (
|
||||
objectType, streamNumber, payload, embeddedTime, '')
|
||||
logger.info('sending inv (for the getpubkey message)')
|
||||
self.logger.info('sending inv (for the getpubkey message)')
|
||||
queues.invQueue.put((streamNumber, inventoryHash))
|
||||
|
||||
# wait 10% past expiration
|
||||
|
|
|
@ -5,7 +5,6 @@ src/class_smtpDeliver.py
|
|||
# pylint: disable=unused-variable
|
||||
|
||||
import smtplib
|
||||
import sys
|
||||
import urlparse
|
||||
from email.header import Header
|
||||
from email.mime.text import MIMEText
|
||||
|
@ -13,8 +12,7 @@ from email.mime.text import MIMEText
|
|||
import queues
|
||||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from helper_threading import StoppableThread
|
||||
from network.threads import StoppableThread
|
||||
|
||||
SMTPDOMAIN = "bmaddr.lan"
|
||||
|
||||
|
@ -75,10 +73,12 @@ class smtpDeliver(StoppableThread):
|
|||
client.starttls()
|
||||
client.ehlo()
|
||||
client.sendmail(msg['From'], [to], msg.as_string())
|
||||
logger.info("Delivered via SMTP to %s through %s:%i ...", to, u.hostname, u.port)
|
||||
self.logger.info(
|
||||
'Delivered via SMTP to %s through %s:%i ...',
|
||||
to, u.hostname, u.port)
|
||||
client.quit()
|
||||
except:
|
||||
logger.error("smtp delivery error", exc_info=True)
|
||||
self.logger.error('smtp delivery error', exc_info=True)
|
||||
elif command == 'displayNewSentMessage':
|
||||
toAddress, fromLabel, fromAddress, subject, message, ackdata = data
|
||||
elif command == 'updateNetworkStatusTab':
|
||||
|
@ -112,5 +112,5 @@ class smtpDeliver(StoppableThread):
|
|||
elif command == 'stopThread':
|
||||
break
|
||||
else:
|
||||
sys.stderr.write(
|
||||
'Command sent to smtpDeliver not recognized: %s\n' % command)
|
||||
self.logger.warning(
|
||||
'Command sent to smtpDeliver not recognized: %s', command)
|
||||
|
|
|
@ -1,28 +1,37 @@
|
|||
"""
|
||||
SMTP server thread
|
||||
"""
|
||||
import asyncore
|
||||
import base64
|
||||
import email
|
||||
from email.parser import Parser
|
||||
from email.header import decode_header
|
||||
import logging
|
||||
import re
|
||||
import signal
|
||||
import smtpd
|
||||
import threading
|
||||
import time
|
||||
from email.header import decode_header
|
||||
from email.parser import Parser
|
||||
|
||||
import queues
|
||||
from addresses import decodeAddress
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from helper_sql import sqlExecute
|
||||
from helper_ackPayload import genAckPayload
|
||||
from helper_threading import StoppableThread
|
||||
import queues
|
||||
from helper_sql import sqlExecute
|
||||
from network.threads import StoppableThread
|
||||
from version import softwareVersion
|
||||
|
||||
SMTPDOMAIN = "bmaddr.lan"
|
||||
LISTENPORT = 8425
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
|
||||
class smtpServerChannel(smtpd.SMTPChannel):
|
||||
"""Asyncore channel for SMTP protocol (server)"""
|
||||
def smtp_EHLO(self, arg):
|
||||
"""Process an EHLO"""
|
||||
if not arg:
|
||||
self.push('501 Syntax: HELO hostname')
|
||||
return
|
||||
|
@ -30,15 +39,17 @@ class smtpServerChannel(smtpd.SMTPChannel):
|
|||
self.push('250 AUTH PLAIN')
|
||||
|
||||
def smtp_AUTH(self, arg):
|
||||
"""Process AUTH"""
|
||||
if not arg or arg[0:5] not in ["PLAIN"]:
|
||||
self.push('501 Syntax: AUTH PLAIN')
|
||||
return
|
||||
authstring = arg[6:]
|
||||
try:
|
||||
decoded = base64.b64decode(authstring)
|
||||
correctauth = "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdusername", "") + \
|
||||
"\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdpassword", "")
|
||||
logger.debug("authstring: %s / %s", correctauth, decoded)
|
||||
correctauth = "\x00" + BMConfigParser().safeGet(
|
||||
"bitmessagesettings", "smtpdusername", "") + "\x00" + BMConfigParser().safeGet(
|
||||
"bitmessagesettings", "smtpdpassword", "")
|
||||
logger.debug('authstring: %s / %s', correctauth, decoded)
|
||||
if correctauth == decoded:
|
||||
self.auth = True
|
||||
self.push('235 2.7.0 Authentication successful')
|
||||
|
@ -48,22 +59,26 @@ class smtpServerChannel(smtpd.SMTPChannel):
|
|||
self.push('501 Authentication fail')
|
||||
|
||||
def smtp_DATA(self, arg):
|
||||
"""Process DATA"""
|
||||
if not hasattr(self, "auth") or not self.auth:
|
||||
self.push ("530 Authentication required")
|
||||
self.push('530 Authentication required')
|
||||
return
|
||||
smtpd.SMTPChannel.smtp_DATA(self, arg)
|
||||
|
||||
|
||||
class smtpServerPyBitmessage(smtpd.SMTPServer):
|
||||
"""Asyncore SMTP server class"""
|
||||
def handle_accept(self):
|
||||
"""Accept a connection"""
|
||||
pair = self.accept()
|
||||
if pair is not None:
|
||||
conn, addr = pair
|
||||
# print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
|
||||
self.channel = smtpServerChannel(self, conn, addr)
|
||||
|
||||
def send(self, fromAddress, toAddress, subject, message):
|
||||
status, addressVersionNumber, streamNumber, ripe = decodeAddress(toAddress)
|
||||
def send(self, fromAddress, toAddress, subject, message): # pylint: disable=arguments-differ
|
||||
"""Send a bitmessage"""
|
||||
streamNumber, ripe = decodeAddress(toAddress)[2:]
|
||||
stealthLevel = BMConfigParser().safeGetInt('bitmessagesettings', 'ackstealthlevel')
|
||||
ackdata = genAckPayload(streamNumber, stealthLevel)
|
||||
sqlExecute(
|
||||
|
@ -75,19 +90,21 @@ class smtpServerPyBitmessage(smtpd.SMTPServer):
|
|||
subject,
|
||||
message,
|
||||
ackdata,
|
||||
int(time.time()), # sentTime (this will never change)
|
||||
int(time.time()), # lastActionTime
|
||||
0, # sleepTill time. This will get set when the POW gets done.
|
||||
int(time.time()), # sentTime (this will never change)
|
||||
int(time.time()), # lastActionTime
|
||||
0, # sleepTill time. This will get set when the POW gets done.
|
||||
'msgqueued',
|
||||
0, # retryNumber
|
||||
'sent', # folder
|
||||
2, # encodingtype
|
||||
min(BMConfigParser().getint('bitmessagesettings', 'ttl'), 86400 * 2) # not necessary to have a TTL higher than 2 days
|
||||
0, # retryNumber
|
||||
'sent', # folder
|
||||
2, # encodingtype
|
||||
# not necessary to have a TTL higher than 2 days
|
||||
min(BMConfigParser().getint('bitmessagesettings', 'ttl'), 86400 * 2)
|
||||
)
|
||||
|
||||
queues.workerQueue.put(('sendmessage', toAddress))
|
||||
|
||||
def decode_header(self, hdr):
|
||||
"""Email header decoding"""
|
||||
ret = []
|
||||
for h in decode_header(self.msg_headers[hdr]):
|
||||
if h[1]:
|
||||
|
@ -97,37 +114,37 @@ class smtpServerPyBitmessage(smtpd.SMTPServer):
|
|||
|
||||
return ret
|
||||
|
||||
|
||||
def process_message(self, peer, mailfrom, rcpttos, data):
|
||||
# print 'Receiving message from:', peer
|
||||
def process_message(self, peer, mailfrom, rcpttos, data): # pylint: disable=too-many-locals, too-many-branches
|
||||
"""Process an email"""
|
||||
# print 'Receiving message from:', peer
|
||||
p = re.compile(".*<([^>]+)>")
|
||||
if not hasattr(self.channel, "auth") or not self.channel.auth:
|
||||
logger.error("Missing or invalid auth")
|
||||
logger.error('Missing or invalid auth')
|
||||
return
|
||||
try:
|
||||
self.msg_headers = Parser().parsestr(data)
|
||||
except:
|
||||
logger.error("Invalid headers")
|
||||
logger.error('Invalid headers')
|
||||
return
|
||||
|
||||
try:
|
||||
sender, domain = p.sub(r'\1', mailfrom).split("@")
|
||||
if domain != SMTPDOMAIN:
|
||||
raise Exception("Bad domain %s", domain)
|
||||
raise Exception("Bad domain %s" % domain)
|
||||
if sender not in BMConfigParser().addresses():
|
||||
raise Exception("Nonexisting user %s", sender)
|
||||
raise Exception("Nonexisting user %s" % sender)
|
||||
except Exception as err:
|
||||
logger.debug("Bad envelope from %s: %s", mailfrom, repr(err))
|
||||
logger.debug('Bad envelope from %s: %r', mailfrom, err)
|
||||
msg_from = self.decode_header("from")
|
||||
try:
|
||||
msg_from = p.sub(r'\1', self.decode_header("from")[0])
|
||||
sender, domain = msg_from.split("@")
|
||||
if domain != SMTPDOMAIN:
|
||||
raise Exception("Bad domain %s", domain)
|
||||
raise Exception("Bad domain %s" % domain)
|
||||
if sender not in BMConfigParser().addresses():
|
||||
raise Exception("Nonexisting user %s", sender)
|
||||
raise Exception("Nonexisting user %s" % sender)
|
||||
except Exception as err:
|
||||
logger.error("Bad headers from %s: %s", msg_from, repr(err))
|
||||
logger.error('Bad headers from %s: %r', msg_from, err)
|
||||
return
|
||||
|
||||
try:
|
||||
|
@ -145,18 +162,20 @@ class smtpServerPyBitmessage(smtpd.SMTPServer):
|
|||
try:
|
||||
rcpt, domain = p.sub(r'\1', to).split("@")
|
||||
if domain != SMTPDOMAIN:
|
||||
raise Exception("Bad domain %s", domain)
|
||||
logger.debug("Sending %s to %s about %s", sender, rcpt, msg_subject)
|
||||
raise Exception("Bad domain %s" % domain)
|
||||
logger.debug(
|
||||
'Sending %s to %s about %s', sender, rcpt, msg_subject)
|
||||
self.send(sender, rcpt, msg_subject, body)
|
||||
logger.info("Relayed %s to %s", sender, rcpt)
|
||||
logger.info('Relayed %s to %s', sender, rcpt)
|
||||
except Exception as err:
|
||||
logger.error( "Bad to %s: %s", to, repr(err))
|
||||
logger.error('Bad to %s: %r', to, err)
|
||||
continue
|
||||
return
|
||||
|
||||
|
||||
class smtpServer(StoppableThread):
|
||||
def __init__(self, parent=None):
|
||||
"""SMTP server thread"""
|
||||
def __init__(self, _=None):
|
||||
super(smtpServer, self).__init__(name="smtpServerThread")
|
||||
self.server = smtpServerPyBitmessage(('127.0.0.1', LISTENPORT), None)
|
||||
|
||||
|
@ -168,21 +187,26 @@ class smtpServer(StoppableThread):
|
|||
def run(self):
|
||||
asyncore.loop(1)
|
||||
|
||||
def signals(signal, frame):
|
||||
print "Got signal, terminating"
|
||||
|
||||
def signals(_, __):
|
||||
"""Signal handler"""
|
||||
logger.warning('Got signal, terminating')
|
||||
for thread in threading.enumerate():
|
||||
if thread.isAlive() and isinstance(thread, StoppableThread):
|
||||
thread.stopThread()
|
||||
|
||||
|
||||
def runServer():
|
||||
print "Running SMTPd thread"
|
||||
"""Run SMTP server as a standalone python process"""
|
||||
logger.warning('Running SMTPd thread')
|
||||
smtpThread = smtpServer()
|
||||
smtpThread.start()
|
||||
signal.signal(signal.SIGINT, signals)
|
||||
signal.signal(signal.SIGTERM, signals)
|
||||
print "Processing"
|
||||
logger.warning('Processing')
|
||||
smtpThread.join()
|
||||
print "The end"
|
||||
logger.warning('The end')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
runServer()
|
||||
|
|
|
@ -1,29 +1,34 @@
|
|||
"""
|
||||
sqlThread is defined here
|
||||
"""
|
||||
|
||||
import threading
|
||||
from bmconfigparser import BMConfigParser
|
||||
|
||||
import sqlite3
|
||||
import time
|
||||
import shutil # used for moving the messages.dat file
|
||||
import sys
|
||||
import os
|
||||
from debug import logger
|
||||
|
||||
import helper_sql
|
||||
import helper_startup
|
||||
import paths
|
||||
import queues
|
||||
import state
|
||||
import tr
|
||||
|
||||
# This thread exists because SQLITE3 is so un-threadsafe that we must
|
||||
# submit queries to it and it puts results back in a different queue. They
|
||||
# won't let us just use locks.
|
||||
# pylint: disable=attribute-defined-outside-init,protected-access
|
||||
|
||||
|
||||
class sqlThread(threading.Thread):
|
||||
"""A thread for all SQL operations"""
|
||||
|
||||
def __init__(self):
|
||||
threading.Thread.__init__(self, name="SQL")
|
||||
|
||||
def run(self):
|
||||
def run(self): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
|
||||
"""Process SQL queries from `.helper_sql.sqlSubmitQueue`"""
|
||||
self.conn = sqlite3.connect(state.appdata + 'messages.dat')
|
||||
self.conn.text_factory = str
|
||||
self.cur = self.conn.cursor()
|
||||
|
@ -32,30 +37,38 @@ class sqlThread(threading.Thread):
|
|||
|
||||
try:
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text, received text, message text, folder text, encodingtype int, read bool, sighash blob, UNIQUE(msgid) ON CONFLICT REPLACE)''' )
|
||||
'''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text,'''
|
||||
''' received text, message text, folder text, encodingtype int, read bool, sighash blob,'''
|
||||
''' UNIQUE(msgid) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, senttime integer, lastactiontime integer, sleeptill integer, status text, retrynumber integer, folder text, encodingtype int, ttl int)''' )
|
||||
'''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text,'''
|
||||
''' message text, ackdata blob, senttime integer, lastactiontime integer,'''
|
||||
''' sleeptill integer, status text, retrynumber integer, folder text, encodingtype int, ttl int)''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE subscriptions (label text, address text, enabled bool)''' )
|
||||
'''CREATE TABLE subscriptions (label text, address text, enabled bool)''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE addressbook (label text, address text)''' )
|
||||
'''CREATE TABLE addressbook (label text, address text)''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE blacklist (label text, address text, enabled bool)''' )
|
||||
'''CREATE TABLE blacklist (label text, address text, enabled bool)''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE whitelist (label text, address text, enabled bool)''' )
|
||||
'''CREATE TABLE whitelist (label text, address text, enabled bool)''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''' )
|
||||
'''CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int,'''
|
||||
''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''' )
|
||||
'''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob,'''
|
||||
''' expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
|
||||
'''INSERT INTO subscriptions VALUES'''
|
||||
'''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''' )
|
||||
self.cur.execute( '''INSERT INTO settings VALUES('version','10')''')
|
||||
self.cur.execute( '''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
|
||||
'''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute('''INSERT INTO settings VALUES('version','10')''')
|
||||
self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
|
||||
int(time.time()),))
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE objectprocessorqueue (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' )
|
||||
'''CREATE TABLE objectprocessorqueue'''
|
||||
''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''')
|
||||
self.conn.commit()
|
||||
logger.info('Created messages database file')
|
||||
except Exception as err:
|
||||
|
@ -120,33 +133,38 @@ class sqlThread(threading.Thread):
|
|||
logger.debug(
|
||||
"In messages.dat database, creating new 'settings' table.")
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE)''' )
|
||||
self.cur.execute( '''INSERT INTO settings VALUES('version','1')''')
|
||||
self.cur.execute( '''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
|
||||
'''CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute('''INSERT INTO settings VALUES('version','1')''')
|
||||
self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', (
|
||||
int(time.time()),))
|
||||
logger.debug('In messages.dat database, removing an obsolete field from the pubkeys table.')
|
||||
self.cur.execute(
|
||||
'''CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE);''')
|
||||
'''CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int,'''
|
||||
''' usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE);''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO pubkeys_backup SELECT hash, transmitdata, time, usedpersonally FROM pubkeys;''')
|
||||
self.cur.execute( '''DROP TABLE pubkeys''')
|
||||
self.cur.execute('''DROP TABLE pubkeys''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE pubkeys (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE)''' )
|
||||
'''CREATE TABLE pubkeys'''
|
||||
''' (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO pubkeys SELECT hash, transmitdata, time, usedpersonally FROM pubkeys_backup;''')
|
||||
self.cur.execute( '''DROP TABLE pubkeys_backup;''')
|
||||
logger.debug('Deleting all pubkeys from inventory. They will be redownloaded and then saved with the correct times.')
|
||||
self.cur.execute('''DROP TABLE pubkeys_backup;''')
|
||||
logger.debug(
|
||||
'Deleting all pubkeys from inventory.'
|
||||
' They will be redownloaded and then saved with the correct times.')
|
||||
self.cur.execute(
|
||||
'''delete from inventory where objecttype = 'pubkey';''')
|
||||
logger.debug('replacing Bitmessage announcements mailing list with a new one.')
|
||||
self.cur.execute(
|
||||
'''delete from subscriptions where address='BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx' ''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO subscriptions VALUES('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
|
||||
'''INSERT INTO subscriptions VALUES'''
|
||||
'''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''')
|
||||
logger.debug('Commiting.')
|
||||
self.conn.commit()
|
||||
logger.debug('Vacuuming message.dat. You might notice that the file size gets much smaller.')
|
||||
self.cur.execute( ''' VACUUM ''')
|
||||
self.cur.execute(''' VACUUM ''')
|
||||
|
||||
# After code refactoring, the possible status values for sent messages
|
||||
# have changed.
|
||||
|
@ -170,15 +188,21 @@ class sqlThread(threading.Thread):
|
|||
'In messages.dat database, removing an obsolete field from'
|
||||
' the inventory table.')
|
||||
self.cur.execute(
|
||||
'''CREATE TEMPORARY TABLE inventory_backup(hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE);''')
|
||||
'''CREATE TEMPORARY TABLE inventory_backup'''
|
||||
'''(hash blob, objecttype text, streamnumber int, payload blob,'''
|
||||
''' receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE);''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO inventory_backup SELECT hash, objecttype, streamnumber, payload, receivedtime FROM inventory;''')
|
||||
self.cur.execute( '''DROP TABLE inventory''')
|
||||
'''INSERT INTO inventory_backup SELECT hash, objecttype, streamnumber, payload, receivedtime'''
|
||||
''' FROM inventory;''')
|
||||
self.cur.execute('''DROP TABLE inventory''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE inventory (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE)''' )
|
||||
'''CREATE TABLE inventory'''
|
||||
''' (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer,'''
|
||||
''' UNIQUE(hash) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO inventory SELECT hash, objecttype, streamnumber, payload, receivedtime FROM inventory_backup;''')
|
||||
self.cur.execute( '''DROP TABLE inventory_backup;''')
|
||||
'''INSERT INTO inventory SELECT hash, objecttype, streamnumber, payload, receivedtime'''
|
||||
''' FROM inventory_backup;''')
|
||||
self.cur.execute('''DROP TABLE inventory_backup;''')
|
||||
item = '''update settings set value=? WHERE key='version';'''
|
||||
parameters = (3,)
|
||||
self.cur.execute(item, parameters)
|
||||
|
@ -208,7 +232,8 @@ class sqlThread(threading.Thread):
|
|||
if currentVersion == 4:
|
||||
self.cur.execute('''DROP TABLE pubkeys''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''')
|
||||
'''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int,'''
|
||||
'''usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute(
|
||||
'''delete from inventory where objecttype = 'pubkey';''')
|
||||
item = '''update settings set value=? WHERE key='version';'''
|
||||
|
@ -224,7 +249,8 @@ class sqlThread(threading.Thread):
|
|||
if currentVersion == 5:
|
||||
self.cur.execute('''DROP TABLE knownnodes''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE objectprocessorqueue (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''')
|
||||
'''CREATE TABLE objectprocessorqueue'''
|
||||
''' (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''')
|
||||
item = '''update settings set value=? WHERE key='version';'''
|
||||
parameters = (6,)
|
||||
self.cur.execute(item, parameters)
|
||||
|
@ -240,10 +266,15 @@ class sqlThread(threading.Thread):
|
|||
logger.debug(
|
||||
'In messages.dat database, dropping and recreating'
|
||||
' the inventory table.')
|
||||
self.cur.execute( '''DROP TABLE inventory''')
|
||||
self.cur.execute( '''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''' )
|
||||
self.cur.execute( '''DROP TABLE objectprocessorqueue''')
|
||||
self.cur.execute( '''CREATE TABLE objectprocessorqueue (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''' )
|
||||
self.cur.execute('''DROP TABLE inventory''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE inventory'''
|
||||
''' (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer,'''
|
||||
''' tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute('''DROP TABLE objectprocessorqueue''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE objectprocessorqueue'''
|
||||
''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''')
|
||||
item = '''update settings set value=? WHERE key='version';'''
|
||||
parameters = (7,)
|
||||
self.cur.execute(item, parameters)
|
||||
|
@ -305,15 +336,24 @@ class sqlThread(threading.Thread):
|
|||
' fields into the retrynumber field and adding the'
|
||||
' sleeptill and ttl fields...')
|
||||
self.cur.execute(
|
||||
'''CREATE TEMPORARY TABLE sent_backup (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, lastactiontime integer, status text, retrynumber integer, folder text, encodingtype int)''' )
|
||||
'''CREATE TEMPORARY TABLE sent_backup'''
|
||||
''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,'''
|
||||
''' ackdata blob, lastactiontime integer, status text, retrynumber integer,'''
|
||||
''' folder text, encodingtype int)''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO sent_backup SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, 0, folder, encodingtype FROM sent;''')
|
||||
self.cur.execute( '''DROP TABLE sent''')
|
||||
'''INSERT INTO sent_backup SELECT msgid, toaddress, toripe, fromaddress,'''
|
||||
''' subject, message, ackdata, lastactiontime,'''
|
||||
''' status, 0, folder, encodingtype FROM sent;''')
|
||||
self.cur.execute('''DROP TABLE sent''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, senttime integer, lastactiontime integer, sleeptill int, status text, retrynumber integer, folder text, encodingtype int, ttl int)''' )
|
||||
'''CREATE TABLE sent'''
|
||||
''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,'''
|
||||
''' ackdata blob, senttime integer, lastactiontime integer, sleeptill int, status text,'''
|
||||
''' retrynumber integer, folder text, encodingtype int, ttl int)''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO sent SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, lastactiontime, 0, status, 0, folder, encodingtype, 216000 FROM sent_backup;''')
|
||||
self.cur.execute( '''DROP TABLE sent_backup''')
|
||||
'''INSERT INTO sent SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata,'''
|
||||
''' lastactiontime, lastactiontime, 0, status, 0, folder, encodingtype, 216000 FROM sent_backup;''')
|
||||
self.cur.execute('''DROP TABLE sent_backup''')
|
||||
logger.info('In messages.dat database, finished making TTL-related changes.')
|
||||
logger.debug('In messages.dat database, adding address field to the pubkeys table.')
|
||||
# We're going to have to calculate the address for each row in the pubkeys
|
||||
|
@ -330,16 +370,24 @@ class sqlThread(threading.Thread):
|
|||
self.cur.execute(item, parameters)
|
||||
# Now we can remove the hash field from the pubkeys table.
|
||||
self.cur.execute(
|
||||
'''CREATE TEMPORARY TABLE pubkeys_backup (address text, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''' )
|
||||
'''CREATE TEMPORARY TABLE pubkeys_backup'''
|
||||
''' (address text, addressversion int, transmitdata blob, time int,'''
|
||||
''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO pubkeys_backup SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys;''')
|
||||
self.cur.execute( '''DROP TABLE pubkeys''')
|
||||
'''INSERT INTO pubkeys_backup'''
|
||||
''' SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys;''')
|
||||
self.cur.execute('''DROP TABLE pubkeys''')
|
||||
self.cur.execute(
|
||||
'''CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int, usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''' )
|
||||
'''CREATE TABLE pubkeys'''
|
||||
''' (address text, addressversion int, transmitdata blob, time int, usedpersonally text,'''
|
||||
''' UNIQUE(address) ON CONFLICT REPLACE)''')
|
||||
self.cur.execute(
|
||||
'''INSERT INTO pubkeys SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys_backup;''')
|
||||
self.cur.execute( '''DROP TABLE pubkeys_backup''')
|
||||
logger.debug('In messages.dat database, done adding address field to the pubkeys table and removing the hash field.')
|
||||
'''INSERT INTO pubkeys SELECT'''
|
||||
''' address, addressversion, transmitdata, time, usedpersonally FROM pubkeys_backup;''')
|
||||
self.cur.execute('''DROP TABLE pubkeys_backup''')
|
||||
logger.debug(
|
||||
'In messages.dat database, done adding address field to the pubkeys table'
|
||||
' and removing the hash field.')
|
||||
self.cur.execute('''update settings set value=10 WHERE key='version';''')
|
||||
|
||||
# Are you hoping to add a new option to the keys.dat file of existing
|
||||
|
@ -349,7 +397,7 @@ class sqlThread(threading.Thread):
|
|||
try:
|
||||
testpayload = '\x00\x00'
|
||||
t = ('1234', 1, testpayload, '12345678', 'no')
|
||||
self.cur.execute( '''INSERT INTO pubkeys VALUES(?,?,?,?,?)''', t)
|
||||
self.cur.execute('''INSERT INTO pubkeys VALUES(?,?,?,?,?)''', t)
|
||||
self.conn.commit()
|
||||
self.cur.execute(
|
||||
'''SELECT transmitdata FROM pubkeys WHERE address='1234' ''')
|
||||
|
@ -359,13 +407,29 @@ class sqlThread(threading.Thread):
|
|||
self.cur.execute('''DELETE FROM pubkeys WHERE address='1234' ''')
|
||||
self.conn.commit()
|
||||
if transmitdata == '':
|
||||
logger.fatal('Problem: The version of SQLite you have cannot store Null values. Please download and install the latest revision of your version of Python (for example, the latest Python 2.7 revision) and try again.\n')
|
||||
logger.fatal('PyBitmessage will now exit very abruptly. You may now see threading errors related to this abrupt exit but the problem you need to solve is related to SQLite.\n\n')
|
||||
logger.fatal(
|
||||
'Problem: The version of SQLite you have cannot store Null values.'
|
||||
' Please download and install the latest revision of your version of Python'
|
||||
' (for example, the latest Python 2.7 revision) and try again.\n')
|
||||
logger.fatal(
|
||||
'PyBitmessage will now exit very abruptly.'
|
||||
' You may now see threading errors related to this abrupt exit'
|
||||
' but the problem you need to solve is related to SQLite.\n\n')
|
||||
os._exit(0)
|
||||
except Exception as err:
|
||||
if str(err) == 'database or disk is full':
|
||||
logger.fatal('(While null value test) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
|
||||
queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
|
||||
logger.fatal(
|
||||
'(While null value test) Alert: Your disk or data storage volume is full.'
|
||||
' sqlThread will now exit.')
|
||||
queues.UISignalQueue.put((
|
||||
'alert', (
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
"Disk full"),
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
|
||||
True)))
|
||||
os._exit(0)
|
||||
else:
|
||||
logger.error(err)
|
||||
|
@ -381,11 +445,21 @@ class sqlThread(threading.Thread):
|
|||
if int(value) < int(time.time()) - 86400:
|
||||
logger.info('It has been a long time since the messages.dat file has been vacuumed. Vacuuming now...')
|
||||
try:
|
||||
self.cur.execute( ''' VACUUM ''')
|
||||
self.cur.execute(''' VACUUM ''')
|
||||
except Exception as err:
|
||||
if str(err) == 'database or disk is full':
|
||||
logger.fatal('(While VACUUM) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
|
||||
queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
|
||||
logger.fatal(
|
||||
'(While VACUUM) Alert: Your disk or data storage volume is full.'
|
||||
' sqlThread will now exit.')
|
||||
queues.UISignalQueue.put((
|
||||
'alert', (
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
"Disk full"),
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
|
||||
True)))
|
||||
os._exit(0)
|
||||
item = '''update settings set value=? WHERE key='lastvacuumtime';'''
|
||||
parameters = (int(time.time()),)
|
||||
|
@ -400,8 +474,18 @@ class sqlThread(threading.Thread):
|
|||
self.conn.commit()
|
||||
except Exception as err:
|
||||
if str(err) == 'database or disk is full':
|
||||
logger.fatal('(While committing) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
|
||||
queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
|
||||
logger.fatal(
|
||||
'(While committing) Alert: Your disk or data storage volume is full.'
|
||||
' sqlThread will now exit.')
|
||||
queues.UISignalQueue.put((
|
||||
'alert', (
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
"Disk full"),
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
|
||||
True)))
|
||||
os._exit(0)
|
||||
elif item == 'exit':
|
||||
self.conn.close()
|
||||
|
@ -415,8 +499,18 @@ class sqlThread(threading.Thread):
|
|||
self.conn.commit()
|
||||
except Exception as err:
|
||||
if str(err) == 'database or disk is full':
|
||||
logger.fatal('(while movemessagstoprog) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
|
||||
queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
|
||||
logger.fatal(
|
||||
'(while movemessagstoprog) Alert: Your disk or data storage volume is full.'
|
||||
' sqlThread will now exit.')
|
||||
queues.UISignalQueue.put((
|
||||
'alert', (
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
"Disk full"),
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
|
||||
True)))
|
||||
os._exit(0)
|
||||
self.conn.close()
|
||||
shutil.move(
|
||||
|
@ -431,8 +525,18 @@ class sqlThread(threading.Thread):
|
|||
self.conn.commit()
|
||||
except Exception as err:
|
||||
if str(err) == 'database or disk is full':
|
||||
logger.fatal('(while movemessagstoappdata) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
|
||||
queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
|
||||
logger.fatal(
|
||||
'(while movemessagstoappdata) Alert: Your disk or data storage volume is full.'
|
||||
' sqlThread will now exit.')
|
||||
queues.UISignalQueue.put((
|
||||
'alert', (
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
"Disk full"),
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
|
||||
True)))
|
||||
os._exit(0)
|
||||
self.conn.close()
|
||||
shutil.move(
|
||||
|
@ -445,11 +549,21 @@ class sqlThread(threading.Thread):
|
|||
self.cur.execute('''delete from sent where folder='trash' ''')
|
||||
self.conn.commit()
|
||||
try:
|
||||
self.cur.execute( ''' VACUUM ''')
|
||||
self.cur.execute(''' VACUUM ''')
|
||||
except Exception as err:
|
||||
if str(err) == 'database or disk is full':
|
||||
logger.fatal('(while deleteandvacuume) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
|
||||
queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
|
||||
logger.fatal(
|
||||
'(while deleteandvacuume) Alert: Your disk or data storage volume is full.'
|
||||
' sqlThread will now exit.')
|
||||
queues.UISignalQueue.put((
|
||||
'alert', (
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
"Disk full"),
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
|
||||
True)))
|
||||
os._exit(0)
|
||||
else:
|
||||
parameters = helper_sql.sqlSubmitQueue.get()
|
||||
|
@ -461,11 +575,30 @@ class sqlThread(threading.Thread):
|
|||
rowcount = self.cur.rowcount
|
||||
except Exception as err:
|
||||
if str(err) == 'database or disk is full':
|
||||
logger.fatal('(while cur.execute) Alert: Your disk or data storage volume is full. sqlThread will now exit.')
|
||||
queues.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
|
||||
logger.fatal(
|
||||
'(while cur.execute) Alert: Your disk or data storage volume is full.'
|
||||
' sqlThread will now exit.')
|
||||
queues.UISignalQueue.put((
|
||||
'alert', (
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
"Disk full"),
|
||||
tr._translate(
|
||||
"MainWindow",
|
||||
'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'),
|
||||
True)))
|
||||
os._exit(0)
|
||||
else:
|
||||
logger.fatal('Major error occurred when trying to execute a SQL statement within the sqlThread. Please tell Atheros about this error message or post it in the forum! Error occurred while trying to execute statement: "%s" Here are the parameters; you might want to censor this data with asterisks (***) as it can contain private information: %s. Here is the actual error message thrown by the sqlThread: %s', str(item), str(repr(parameters)), str(err))
|
||||
logger.fatal(
|
||||
'Major error occurred when trying to execute a SQL statement within the sqlThread.'
|
||||
' Please tell Atheros about this error message or post it in the forum!'
|
||||
' Error occurred while trying to execute statement: "%s" Here are the parameters;'
|
||||
' you might want to censor this data with asterisks (***)'
|
||||
' as it can contain private information: %s.'
|
||||
' Here is the actual error message thrown by the sqlThread: %s',
|
||||
str(item),
|
||||
str(repr(parameters)),
|
||||
str(err))
|
||||
logger.fatal('This program shall now abruptly exit!')
|
||||
|
||||
os._exit(0)
|
||||
|
|
93
src/debug.py
93
src/debug.py
|
@ -1,26 +1,38 @@
|
|||
"""
|
||||
Logging and debuging facility
|
||||
=============================
|
||||
-----------------------------
|
||||
|
||||
Levels:
|
||||
|
||||
DEBUG
|
||||
Detailed information, typically of interest only when diagnosing problems.
|
||||
INFO
|
||||
Confirmation that things are working as expected.
|
||||
WARNING
|
||||
An indication that something unexpected happened, or indicative of some problem in the
|
||||
near future (e.g. 'disk space low'). The software is still working as expected.
|
||||
ERROR
|
||||
Due to a more serious problem, the software has not been able to perform some function.
|
||||
CRITICAL
|
||||
A serious error, indicating that the program itself may be unable to continue running.
|
||||
DEBUG
|
||||
Detailed information, typically of interest only when diagnosing problems.
|
||||
INFO
|
||||
Confirmation that things are working as expected.
|
||||
WARNING
|
||||
An indication that something unexpected happened, or indicative of
|
||||
some problem in the near future (e.g. 'disk space low'). The software
|
||||
is still working as expected.
|
||||
ERROR
|
||||
Due to a more serious problem, the software has not been able to
|
||||
perform some function.
|
||||
CRITICAL
|
||||
A serious error, indicating that the program itself may be unable to
|
||||
continue running.
|
||||
|
||||
There are three loggers: `console_only`, `file_only` and `both`.
|
||||
There are three loggers by default: `console_only`, `file_only` and `both`.
|
||||
You can configure logging in the logging.dat in the appdata dir.
|
||||
It's format is described in the :func:`logging.config.fileConfig` doc.
|
||||
|
||||
Use: `from debug import logger` to import this facility into whatever module you wish to log messages from.
|
||||
Logging is thread-safe so you don't have to worry about locks, just import and log.
|
||||
Use:
|
||||
|
||||
>>> import logging
|
||||
>>> logger = logging.getLogger('default')
|
||||
|
||||
The old form: ``from debug import logger`` is also may be used,
|
||||
but only in the top level modules.
|
||||
|
||||
Logging is thread-safe so you don't have to worry about locks,
|
||||
just import and log.
|
||||
"""
|
||||
|
||||
import ConfigParser
|
||||
|
@ -28,6 +40,7 @@ import logging
|
|||
import logging.config
|
||||
import os
|
||||
import sys
|
||||
|
||||
import helper_startup
|
||||
import state
|
||||
|
||||
|
@ -40,14 +53,22 @@ log_level = 'WARNING'
|
|||
|
||||
|
||||
def log_uncaught_exceptions(ex_cls, ex, tb):
|
||||
"""The last resort logging function used for sys.excepthook"""
|
||||
logging.critical('Unhandled exception', exc_info=(ex_cls, ex, tb))
|
||||
|
||||
|
||||
def configureLogging():
|
||||
"""
|
||||
Configure logging,
|
||||
using either logging.dat file in the state.appdata dir
|
||||
or dictionary with hardcoded settings.
|
||||
"""
|
||||
sys.excepthook = log_uncaught_exceptions
|
||||
fail_msg = ''
|
||||
try:
|
||||
logging_config = os.path.join(state.appdata, 'logging.dat')
|
||||
logging.config.fileConfig(logging_config)
|
||||
logging.config.fileConfig(
|
||||
logging_config, disable_existing_loggers=False)
|
||||
return (
|
||||
False,
|
||||
'Loaded logger configuration from %s' % logging_config
|
||||
|
@ -59,12 +80,11 @@ def configureLogging():
|
|||
' logging config\n%s' % \
|
||||
(logging_config, sys.exc_info())
|
||||
else:
|
||||
# no need to confuse the user if the logger config is missing entirely
|
||||
# no need to confuse the user if the logger config
|
||||
# is missing entirely
|
||||
fail_msg = 'Using default logger configuration'
|
||||
|
||||
sys.excepthook = log_uncaught_exceptions
|
||||
|
||||
logging.config.dictConfig({
|
||||
logging_config = {
|
||||
'version': 1,
|
||||
'formatters': {
|
||||
'default': {
|
||||
|
@ -106,34 +126,29 @@ def configureLogging():
|
|||
'level': log_level,
|
||||
'handlers': ['console'],
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
logging_config['loggers']['default'] = logging_config['loggers'][
|
||||
'file_only' if '-c' in sys.argv else 'both']
|
||||
logging.config.dictConfig(logging_config)
|
||||
|
||||
return True, fail_msg
|
||||
|
||||
|
||||
def initLogging():
|
||||
preconfigured, msg = configureLogging()
|
||||
if preconfigured:
|
||||
if '-c' in sys.argv:
|
||||
logger = logging.getLogger('file_only')
|
||||
else:
|
||||
logger = logging.getLogger('both')
|
||||
else:
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
if msg:
|
||||
logger.log(logging.WARNING if preconfigured else logging.INFO, msg)
|
||||
return logger
|
||||
|
||||
|
||||
def resetLogging():
|
||||
"""Reconfigure logging in runtime when state.appdata dir changed"""
|
||||
# pylint: disable=global-statement, used-before-assignment
|
||||
global logger
|
||||
for i in logger.handlers.iterkeys():
|
||||
for i in logger.handlers:
|
||||
logger.removeHandler(i)
|
||||
i.flush()
|
||||
i.close()
|
||||
logger = initLogging()
|
||||
configureLogging()
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
# !
|
||||
logger = initLogging()
|
||||
preconfigured, msg = configureLogging()
|
||||
logger = logging.getLogger('default')
|
||||
if msg:
|
||||
logger.log(logging.WARNING if preconfigured else logging.INFO, msg)
|
||||
|
|
|
@ -1,24 +1,24 @@
|
|||
"""
|
||||
src/defaults.py
|
||||
===============
|
||||
Common default values
|
||||
"""
|
||||
|
||||
# sanity check, prevent doing ridiculous PoW
|
||||
# 20 million PoWs equals approximately 2 days on dev's dual R9 290
|
||||
#: sanity check, prevent doing ridiculous PoW
|
||||
#: 20 million PoWs equals approximately 2 days on dev's dual R9 290
|
||||
ridiculousDifficulty = 20000000
|
||||
|
||||
# Remember here the RPC port read from namecoin.conf so we can restore to
|
||||
# it as default whenever the user changes the "method" selection for
|
||||
# namecoin integration to "namecoind".
|
||||
#: Remember here the RPC port read from namecoin.conf so we can restore to
|
||||
#: it as default whenever the user changes the "method" selection for
|
||||
#: namecoin integration to "namecoind".
|
||||
namecoinDefaultRpcPort = "8336"
|
||||
|
||||
# If changed, these values will cause particularly unexpected behavior:
|
||||
# You won't be able to either send or receive messages because the proof
|
||||
# of work you do (or demand) won't match that done or demanded by others.
|
||||
# Don't change them!
|
||||
# The amount of work that should be performed (and demanded) per byte of the payload.
|
||||
#: The amount of work that should be performed (and demanded) per byte
|
||||
#: of the payload.
|
||||
networkDefaultProofOfWorkNonceTrialsPerByte = 1000
|
||||
# To make sending short messages a little more difficult, this value is
|
||||
# added to the payload length for use in calculating the proof of work
|
||||
# target.
|
||||
#: To make sending short messages a little more difficult, this value is
|
||||
#: added to the payload length for use in calculating the proof of work
|
||||
#: target.
|
||||
networkDefaultPayloadLengthExtraBytes = 1000
|
||||
|
|
|
@ -113,6 +113,7 @@ PACKAGES = {
|
|||
|
||||
|
||||
def detectOS():
|
||||
"""Finding out what Operating System is running"""
|
||||
if detectOS.result is not None:
|
||||
return detectOS.result
|
||||
if sys.platform.startswith('openbsd'):
|
||||
|
@ -132,6 +133,7 @@ detectOS.result = None
|
|||
|
||||
|
||||
def detectOSRelease():
|
||||
"""Detecting the release of OS"""
|
||||
with open("/etc/os-release", 'r') as osRelease:
|
||||
version = None
|
||||
for line in osRelease:
|
||||
|
@ -148,6 +150,7 @@ def detectOSRelease():
|
|||
|
||||
|
||||
def try_import(module, log_extra=False):
|
||||
"""Try to import the non imported packages"""
|
||||
try:
|
||||
return import_module(module)
|
||||
except ImportError:
|
||||
|
@ -208,10 +211,8 @@ def check_sqlite():
|
|||
).fetchone()[0]
|
||||
logger.info('SQLite Library Source ID: %s', sqlite_source_id)
|
||||
if sqlite_version_number >= 3006023:
|
||||
compile_options = ', '.join(map(
|
||||
lambda row: row[0],
|
||||
conn.execute('PRAGMA compile_options;')
|
||||
))
|
||||
compile_options = ', '.join(
|
||||
[row[0] for row in conn.execute('PRAGMA compile_options;')])
|
||||
logger.info(
|
||||
'SQLite Library Compile Options: %s', compile_options)
|
||||
# There is no specific version requirement as yet, so we just
|
||||
|
@ -230,13 +231,13 @@ def check_sqlite():
|
|||
conn.close()
|
||||
|
||||
|
||||
def check_openssl():
|
||||
def check_openssl(): # pylint: disable=too-many-branches, too-many-return-statements
|
||||
"""Do openssl dependency check.
|
||||
|
||||
Here we are checking for openssl with its all dependent libraries
|
||||
and version checking.
|
||||
"""
|
||||
|
||||
# pylint: disable=protected-access, redefined-outer-name
|
||||
ctypes = try_import('ctypes')
|
||||
if not ctypes:
|
||||
logger.error('Unable to check OpenSSL.')
|
||||
|
@ -303,7 +304,7 @@ def check_openssl():
|
|||
' ECDH, and ECDSA enabled.')
|
||||
return False
|
||||
matches = cflags_regex.findall(openssl_cflags)
|
||||
if len(matches) > 0:
|
||||
if matches:
|
||||
logger.error(
|
||||
'This OpenSSL library is missing the following required'
|
||||
' features: %s. PyBitmessage requires OpenSSL 0.9.8b'
|
||||
|
@ -314,13 +315,13 @@ def check_openssl():
|
|||
return False
|
||||
|
||||
|
||||
# TODO: The minimum versions of pythondialog and dialog need to be determined
|
||||
# ..todo:: The minimum versions of pythondialog and dialog need to be determined
|
||||
def check_curses():
|
||||
"""Do curses dependency check.
|
||||
|
||||
Here we are checking for curses if available or not with check
|
||||
as interface requires the pythondialog\ package and the dialog
|
||||
utility.
|
||||
Here we are checking for curses if available or not with check as interface
|
||||
requires the `pythondialog <https://pypi.org/project/pythondialog>`_ package
|
||||
and the dialog utility.
|
||||
"""
|
||||
if sys.hexversion < 0x20600F0:
|
||||
logger.error(
|
||||
|
|
|
@ -1,13 +1,19 @@
|
|||
"""
|
||||
.. todo:: hello world
|
||||
Fallback expressions help PyBitmessage modules to run without some external
|
||||
dependencies.
|
||||
|
||||
|
||||
RIPEMD160Hash
|
||||
-------------
|
||||
|
||||
We need to check :mod:`hashlib` for RIPEMD-160, as it won't be available
|
||||
if OpenSSL is not linked against or the linked OpenSSL has RIPEMD disabled.
|
||||
Try to use `pycryptodome <https://pypi.org/project/pycryptodome/>`_
|
||||
in that case.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
|
||||
# We need to check hashlib for RIPEMD-160, as it won't be available
|
||||
# if OpenSSL is not linked against or the linked OpenSSL has RIPEMD
|
||||
# disabled.
|
||||
|
||||
try:
|
||||
hashlib.new('ripemd160')
|
||||
except ValueError:
|
||||
|
|
|
@ -1,22 +1,28 @@
|
|||
"""This module is for generating ack payload."""
|
||||
"""
|
||||
This module is for generating ack payload
|
||||
"""
|
||||
|
||||
from binascii import hexlify
|
||||
from struct import pack
|
||||
|
||||
import highlevelcrypto
|
||||
import helper_random
|
||||
from binascii import hexlify
|
||||
from struct import pack
|
||||
from addresses import encodeVarint
|
||||
|
||||
# This function generates payload objects for message acknowledgements
|
||||
# Several stealth levels are available depending on the privacy needs;
|
||||
# a higher level means better stealth, but also higher cost (size+POW)
|
||||
# - level 0: a random 32-byte sequence with a message header appended
|
||||
# - level 1: a getpubkey request for a (random) dummy key hash
|
||||
# - level 2: a standard message, encrypted to a random pubkey
|
||||
|
||||
|
||||
def genAckPayload(streamNumber=1, stealthLevel=0):
|
||||
"""Generate and return payload obj."""
|
||||
if (stealthLevel == 2): # Generate privacy-enhanced payload
|
||||
"""
|
||||
Generate and return payload obj.
|
||||
|
||||
This function generates payload objects for message acknowledgements
|
||||
Several stealth levels are available depending on the privacy needs;
|
||||
a higher level means better stealth, but also higher cost (size+POW)
|
||||
|
||||
- level 0: a random 32-byte sequence with a message header appended
|
||||
- level 1: a getpubkey request for a (random) dummy key hash
|
||||
- level 2: a standard message, encrypted to a random pubkey
|
||||
"""
|
||||
if stealthLevel == 2: # Generate privacy-enhanced payload
|
||||
# Generate a dummy privkey and derive the pubkey
|
||||
dummyPubKeyHex = highlevelcrypto.privToPub(
|
||||
hexlify(helper_random.randomBytes(32)))
|
||||
|
@ -29,7 +35,7 @@ def genAckPayload(streamNumber=1, stealthLevel=0):
|
|||
acktype = 2 # message
|
||||
version = 1
|
||||
|
||||
elif (stealthLevel == 1): # Basic privacy payload (random getpubkey)
|
||||
elif stealthLevel == 1: # Basic privacy payload (random getpubkey)
|
||||
ackdata = helper_random.randomBytes(32)
|
||||
acktype = 0 # getpubkey
|
||||
version = 4
|
||||
|
|
|
@ -1,10 +1,19 @@
|
|||
"""
|
||||
Calculates bitcoin and testnet address from pubkey
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
|
||||
from debug import logger
|
||||
from pyelliptic import arithmetic
|
||||
|
||||
# This function expects that pubkey begin with \x04
|
||||
|
||||
def calculateBitcoinAddressFromPubkey(pubkey):
|
||||
"""Calculate bitcoin address from given pubkey (65 bytes long hex string)"""
|
||||
if len(pubkey) != 65:
|
||||
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey), 'bytes long rather than 65.'
|
||||
logger.error('Could not calculate Bitcoin address from pubkey because'
|
||||
' function was passed a pubkey that was'
|
||||
' %i bytes long rather than 65.', len(pubkey))
|
||||
return "error"
|
||||
ripe = hashlib.new('ripemd160')
|
||||
sha = hashlib.new('sha256')
|
||||
|
@ -24,8 +33,11 @@ def calculateBitcoinAddressFromPubkey(pubkey):
|
|||
|
||||
|
||||
def calculateTestnetAddressFromPubkey(pubkey):
|
||||
"""This function expects that pubkey begin with the testnet prefix"""
|
||||
if len(pubkey) != 65:
|
||||
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey), 'bytes long rather than 65.'
|
||||
logger.error('Could not calculate Bitcoin address from pubkey because'
|
||||
' function was passed a pubkey that was'
|
||||
' %i bytes long rather than 65.', len(pubkey))
|
||||
return "error"
|
||||
ripe = hashlib.new('ripemd160')
|
||||
sha = hashlib.new('sha256')
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
"""Helper Inbox performs inbox messagese related operations."""
|
||||
"""Helper Inbox performs inbox messages related operations"""
|
||||
|
||||
from helper_sql import sqlExecute, sqlQuery
|
||||
import queues
|
||||
from helper_sql import sqlExecute, sqlQuery
|
||||
|
||||
|
||||
def insert(t):
|
||||
"""Perform an insert into the "inbox" table"""
|
||||
sqlExecute('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?,?,?,?)''', *t)
|
||||
# shouldn't emit changedInboxUnread and displayNewInboxMessage
|
||||
# at the same time
|
||||
|
@ -12,11 +13,13 @@ def insert(t):
|
|||
|
||||
|
||||
def trash(msgid):
|
||||
"""Mark a message in the `inbox` as `trash`"""
|
||||
sqlExecute('''UPDATE inbox SET folder='trash' WHERE msgid=?''', msgid)
|
||||
queues.UISignalQueue.put(('removeInboxRowByMsgid', msgid))
|
||||
|
||||
|
||||
def isMessageAlreadyInInbox(sigHash):
|
||||
"""Check for previous instances of this message"""
|
||||
queryReturn = sqlQuery(
|
||||
'''SELECT COUNT(*) FROM inbox WHERE sighash=?''', sigHash)
|
||||
return queryReturn[0][0] != 0
|
||||
|
|
|
@ -25,19 +25,24 @@ BITMESSAGE_ENCODING_EXTENDED = 3
|
|||
|
||||
|
||||
class MsgEncodeException(Exception):
|
||||
"""Exception during message encoding"""
|
||||
pass
|
||||
|
||||
|
||||
class MsgDecodeException(Exception):
|
||||
"""Exception during message decoding"""
|
||||
pass
|
||||
|
||||
|
||||
class DecompressionSizeException(MsgDecodeException):
|
||||
# pylint: disable=super-init-not-called
|
||||
"""Decompression resulted in too much data (attack protection)"""
|
||||
def __init__(self, size):
|
||||
self.size = size
|
||||
|
||||
|
||||
class MsgEncode(object):
|
||||
"""Message encoder class"""
|
||||
def __init__(self, message, encoding=BITMESSAGE_ENCODING_SIMPLE):
|
||||
self.data = None
|
||||
self.encoding = encoding
|
||||
|
@ -52,6 +57,7 @@ class MsgEncode(object):
|
|||
raise MsgEncodeException("Unknown encoding %i" % (encoding))
|
||||
|
||||
def encodeExtended(self, message):
|
||||
"""Handle extended encoding"""
|
||||
try:
|
||||
msgObj = messagetypes.message.Message()
|
||||
self.data = zlib.compress(msgpack.dumps(msgObj.encode(message)), 9)
|
||||
|
@ -64,15 +70,18 @@ class MsgEncode(object):
|
|||
self.length = len(self.data)
|
||||
|
||||
def encodeSimple(self, message):
|
||||
"""Handle simple encoding"""
|
||||
self.data = 'Subject:%(subject)s\nBody:%(body)s' % message
|
||||
self.length = len(self.data)
|
||||
|
||||
def encodeTrivial(self, message):
|
||||
"""Handle trivial encoding"""
|
||||
self.data = message['body']
|
||||
self.length = len(self.data)
|
||||
|
||||
|
||||
class MsgDecode(object):
|
||||
"""Message decoder class"""
|
||||
def __init__(self, encoding, data):
|
||||
self.encoding = encoding
|
||||
if self.encoding == BITMESSAGE_ENCODING_EXTENDED:
|
||||
|
@ -88,6 +97,7 @@ class MsgDecode(object):
|
|||
self.subject = _translate("MsgDecode", "Unknown encoding")
|
||||
|
||||
def decodeExtended(self, data):
|
||||
"""Handle extended encoding"""
|
||||
dc = zlib.decompressobj()
|
||||
tmp = ""
|
||||
while len(tmp) <= BMConfigParser().safeGetInt("zlib", "maxsize"):
|
||||
|
@ -131,6 +141,7 @@ class MsgDecode(object):
|
|||
self.body = msgObj.body
|
||||
|
||||
def decodeSimple(self, data):
|
||||
"""Handle simple encoding"""
|
||||
bodyPositionIndex = string.find(data, '\nBody:')
|
||||
if bodyPositionIndex > 1:
|
||||
subject = data[8:bodyPositionIndex]
|
||||
|
|
|
@ -56,8 +56,7 @@ def randomrandrange(x, y=None):
|
|||
"""
|
||||
if isinstance(y, NoneType):
|
||||
return random.randrange(x) # nosec
|
||||
else:
|
||||
return random.randrange(x, y) # nosec
|
||||
return random.randrange(x, y) # nosec
|
||||
|
||||
|
||||
def randomchoice(population):
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/python2.7
|
||||
"""Additional SQL helper for searching messages"""
|
||||
|
||||
from helper_sql import *
|
||||
from helper_sql import sqlQuery
|
||||
|
||||
try:
|
||||
from PyQt4 import QtGui
|
||||
|
@ -8,13 +8,17 @@ try:
|
|||
except ImportError:
|
||||
haveQt = False
|
||||
|
||||
def search_translate (context, text):
|
||||
|
||||
def search_translate(context, text):
|
||||
"""Translation wrapper"""
|
||||
if haveQt:
|
||||
return QtGui.QApplication.translate(context, text)
|
||||
else:
|
||||
return text.lower()
|
||||
return text.lower()
|
||||
|
||||
def search_sql(xAddress = "toaddress", account = None, folder = "inbox", where = None, what = None, unreadOnly = False):
|
||||
|
||||
def search_sql(xAddress="toaddress", account=None, folder="inbox", where=None, what=None, unreadOnly=False):
|
||||
"""Perform a search in mailbox tables"""
|
||||
# pylint: disable=too-many-arguments, too-many-branches
|
||||
if what is not None and what != "":
|
||||
what = "%" + what + "%"
|
||||
if where == search_translate("MainWindow", "To"):
|
||||
|
@ -32,7 +36,7 @@ def search_sql(xAddress = "toaddress", account = None, folder = "inbox", where =
|
|||
|
||||
if folder == "sent":
|
||||
sqlStatementBase = '''
|
||||
SELECT toaddress, fromaddress, subject, status, ackdata, lastactiontime
|
||||
SELECT toaddress, fromaddress, subject, status, ackdata, lastactiontime
|
||||
FROM sent '''
|
||||
else:
|
||||
sqlStatementBase = '''SELECT folder, msgid, toaddress, fromaddress, subject, received, read
|
||||
|
@ -62,13 +66,16 @@ def search_sql(xAddress = "toaddress", account = None, folder = "inbox", where =
|
|||
sqlArguments.append(what)
|
||||
if unreadOnly:
|
||||
sqlStatementParts.append("read = 0")
|
||||
if len(sqlStatementParts) > 0:
|
||||
if sqlStatementParts:
|
||||
sqlStatementBase += "WHERE " + " AND ".join(sqlStatementParts)
|
||||
if folder == "sent":
|
||||
sqlStatementBase += " ORDER BY lastactiontime"
|
||||
return sqlQuery(sqlStatementBase, sqlArguments)
|
||||
|
||||
def check_match(toAddress, fromAddress, subject, message, where = None, what = None):
|
||||
|
||||
def check_match(toAddress, fromAddress, subject, message, where=None, what=None):
|
||||
"""Check if a single message matches a filter (used when new messages are added to messagelists)"""
|
||||
# pylint: disable=too-many-arguments
|
||||
if what is not None and what != "":
|
||||
if where in (search_translate("MainWindow", "To"), search_translate("MainWindow", "All")):
|
||||
if what.lower() not in toAddress.lower():
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
"""
|
||||
Insert values into sent table
|
||||
"""
|
||||
|
||||
from helper_sql import *
|
||||
|
||||
def insert(t):
|
||||
"""Perform an insert into the `sent` table"""
|
||||
sqlExecute('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', *t)
|
||||
|
|
|
@ -1,17 +1,39 @@
|
|||
"""Helper Sql performs sql operations."""
|
||||
"""
|
||||
SQL-related functions defined here are really pass the queries (or other SQL
|
||||
commands) to :class:`.threads.sqlThread` through `sqlSubmitQueue` queue and check
|
||||
or return the result got from `sqlReturnQueue`.
|
||||
|
||||
This is done that way because :mod:`sqlite3` is so thread-unsafe that they
|
||||
won't even let you call it from different threads using your own locks.
|
||||
SQLite objects can only be used from one thread.
|
||||
|
||||
.. note:: This actually only applies for certain deployments, and/or
|
||||
really old version of sqlite. I haven't actually seen it anywhere.
|
||||
Current versions do have support for threading and multiprocessing.
|
||||
I don't see an urgent reason to refactor this, but it should be noted
|
||||
in the comment that the problem is mostly not valid. Sadly, last time
|
||||
I checked, there is no reliable way to check whether the library is
|
||||
or isn't thread-safe.
|
||||
"""
|
||||
|
||||
import threading
|
||||
import Queue
|
||||
|
||||
sqlSubmitQueue = Queue.Queue()
|
||||
# SQLITE3 is so thread-unsafe that they won't even let you call it from different threads using your own locks.
|
||||
# SQL objects #can only be called from one thread.
|
||||
"""the queue for SQL"""
|
||||
sqlReturnQueue = Queue.Queue()
|
||||
"""the queue for results"""
|
||||
sqlLock = threading.Lock()
|
||||
|
||||
|
||||
def sqlQuery(sqlStatement, *args):
|
||||
"""SQLLITE execute statement and return query."""
|
||||
"""
|
||||
Query sqlite and return results
|
||||
|
||||
:param str sqlStatement: SQL statement string
|
||||
:param list args: SQL query parameters
|
||||
:rtype: list
|
||||
"""
|
||||
sqlLock.acquire()
|
||||
sqlSubmitQueue.put(sqlStatement)
|
||||
|
||||
|
@ -28,6 +50,7 @@ def sqlQuery(sqlStatement, *args):
|
|||
|
||||
|
||||
def sqlExecuteChunked(sqlStatement, idCount, *args):
|
||||
"""Execute chunked SQL statement to avoid argument limit"""
|
||||
# SQLITE_MAX_VARIABLE_NUMBER,
|
||||
# unfortunately getting/setting isn't exposed to python
|
||||
sqlExecuteChunked.chunkSize = 999
|
||||
|
@ -58,6 +81,7 @@ def sqlExecuteChunked(sqlStatement, idCount, *args):
|
|||
|
||||
|
||||
def sqlExecute(sqlStatement, *args):
|
||||
"""Execute SQL statement (optionally with arguments)"""
|
||||
sqlLock.acquire()
|
||||
sqlSubmitQueue.put(sqlStatement)
|
||||
|
||||
|
@ -70,13 +94,15 @@ def sqlExecute(sqlStatement, *args):
|
|||
sqlLock.release()
|
||||
return rowcount
|
||||
|
||||
|
||||
def sqlStoredProcedure(procName):
|
||||
"""Schedule procName to be run"""
|
||||
sqlLock.acquire()
|
||||
sqlSubmitQueue.put(procName)
|
||||
sqlLock.release()
|
||||
|
||||
|
||||
class SqlBulkExecute:
|
||||
class SqlBulkExecute(object):
|
||||
"""This is used when you have to execute the same statement in a cycle."""
|
||||
|
||||
def __enter__(self):
|
||||
|
|
|
@ -1,13 +1,9 @@
|
|||
"""
|
||||
src/helper_startup.py
|
||||
=====================
|
||||
|
||||
Helper Start performs all the startup operations.
|
||||
Startup operations.
|
||||
"""
|
||||
# pylint: disable=too-many-branches,too-many-statements
|
||||
from __future__ import print_function
|
||||
|
||||
import ConfigParser
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
|
@ -19,28 +15,12 @@ import paths
|
|||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
|
||||
|
||||
# The user may de-select Portable Mode in the settings if they want
|
||||
# the config files to stay in the application data folder.
|
||||
StoreConfigFilesInSameDirectoryAsProgramByDefault = False
|
||||
|
||||
|
||||
def _loadTrustedPeer():
|
||||
try:
|
||||
trustedPeer = BMConfigParser().get('bitmessagesettings', 'trustedpeer')
|
||||
except ConfigParser.Error:
|
||||
# This probably means the trusted peer wasn't specified so we
|
||||
# can just leave it as None
|
||||
return
|
||||
try:
|
||||
host, port = trustedPeer.split(':')
|
||||
except ValueError:
|
||||
sys.exit(
|
||||
'Bad trustedpeer config setting! It should be set as'
|
||||
' trustedpeer=<hostname>:<portnumber>'
|
||||
)
|
||||
state.trustedPeer = state.Peer(host, int(port))
|
||||
|
||||
|
||||
def loadConfig():
|
||||
"""Load the config"""
|
||||
config = BMConfigParser()
|
||||
|
@ -135,8 +115,6 @@ def loadConfig():
|
|||
else:
|
||||
updateConfig()
|
||||
|
||||
_loadTrustedPeer()
|
||||
|
||||
|
||||
def updateConfig():
|
||||
"""Save the config"""
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
"""Helper threading perform all the threading operations."""
|
||||
|
||||
import threading
|
||||
from contextlib import contextmanager
|
||||
|
||||
import helper_random
|
||||
|
||||
try:
|
||||
import prctl
|
||||
except ImportError:
|
||||
def set_thread_name(name):
|
||||
"""Set the thread name for external use (visible from the OS)."""
|
||||
threading.current_thread().name = name
|
||||
else:
|
||||
def set_thread_name(name):
|
||||
"""Set a name for the thread for python internal use."""
|
||||
prctl.set_name(name)
|
||||
|
||||
def _thread_name_hack(self):
|
||||
set_thread_name(self.name)
|
||||
threading.Thread.__bootstrap_original__(self)
|
||||
|
||||
threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap
|
||||
threading.Thread._Thread__bootstrap = _thread_name_hack
|
||||
|
||||
|
||||
class StoppableThread(threading.Thread):
|
||||
name = None
|
||||
|
||||
def __init__(self, name=None):
|
||||
if name:
|
||||
self.name = name
|
||||
super(StoppableThread, self).__init__(name=self.name)
|
||||
self.initStop()
|
||||
helper_random.seed()
|
||||
|
||||
def initStop(self):
|
||||
self.stop = threading.Event()
|
||||
self._stopped = False
|
||||
|
||||
def stopThread(self):
|
||||
self._stopped = True
|
||||
self.stop.set()
|
||||
|
||||
|
||||
class BusyError(threading.ThreadError):
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
def nonBlocking(lock):
|
||||
locked = lock.acquire(False)
|
||||
if not locked:
|
||||
raise BusyError
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
lock.release()
|
|
@ -1,6 +1,10 @@
|
|||
"""
|
||||
src/highlevelcrypto.py
|
||||
======================
|
||||
High level cryptographic functions based on `.pyelliptic` OpenSSL bindings.
|
||||
|
||||
.. note::
|
||||
Upstream pyelliptic was upgraded from SHA1 to SHA256 for signing.
|
||||
We must upgrade PyBitmessage gracefully.
|
||||
`More discussion. <https://github.com/yann2192/pyelliptic/issues/32>`_
|
||||
"""
|
||||
|
||||
from binascii import hexlify
|
||||
|
@ -12,12 +16,13 @@ from pyelliptic import arithmetic as a
|
|||
|
||||
|
||||
def makeCryptor(privkey):
|
||||
"""Return a private pyelliptic.ECC() instance"""
|
||||
"""Return a private `.pyelliptic.ECC` instance"""
|
||||
private_key = a.changebase(privkey, 16, 256, minlen=32)
|
||||
public_key = pointMult(private_key)
|
||||
privkey_bin = '\x02\xca\x00\x20' + private_key
|
||||
pubkey_bin = '\x02\xca\x00\x20' + public_key[1:-32] + '\x00\x20' + public_key[-32:]
|
||||
cryptor = pyelliptic.ECC(curve='secp256k1', privkey=privkey_bin, pubkey=pubkey_bin)
|
||||
cryptor = pyelliptic.ECC(
|
||||
curve='secp256k1', privkey=privkey_bin, pubkey=pubkey_bin)
|
||||
return cryptor
|
||||
|
||||
|
||||
|
@ -29,7 +34,7 @@ def hexToPubkey(pubkey):
|
|||
|
||||
|
||||
def makePubCryptor(pubkey):
|
||||
"""Return a public pyelliptic.ECC() instance"""
|
||||
"""Return a public `.pyelliptic.ECC` instance"""
|
||||
pubkey_bin = hexToPubkey(pubkey)
|
||||
return pyelliptic.ECC(curve='secp256k1', pubkey=pubkey_bin)
|
||||
|
||||
|
@ -43,7 +48,8 @@ def privToPub(privkey):
|
|||
|
||||
def encrypt(msg, hexPubkey):
|
||||
"""Encrypts message with hex public key"""
|
||||
return pyelliptic.ECC(curve='secp256k1').encrypt(msg, hexToPubkey(hexPubkey))
|
||||
return pyelliptic.ECC(curve='secp256k1').encrypt(
|
||||
msg, hexToPubkey(hexPubkey))
|
||||
|
||||
|
||||
def decrypt(msg, hexPrivkey):
|
||||
|
@ -52,36 +58,38 @@ def decrypt(msg, hexPrivkey):
|
|||
|
||||
|
||||
def decryptFast(msg, cryptor):
|
||||
"""Decrypts message with an existing pyelliptic.ECC.ECC object"""
|
||||
"""Decrypts message with an existing `.pyelliptic.ECC` object"""
|
||||
return cryptor.decrypt(msg)
|
||||
|
||||
|
||||
def sign(msg, hexPrivkey):
|
||||
"""Signs with hex private key"""
|
||||
# pyelliptic is upgrading from SHA1 to SHA256 for signing. We must
|
||||
# upgrade PyBitmessage gracefully.
|
||||
# https://github.com/yann2192/pyelliptic/pull/33
|
||||
# More discussion: https://github.com/yann2192/pyelliptic/issues/32
|
||||
digestAlg = BMConfigParser().safeGet('bitmessagesettings', 'digestalg', 'sha1')
|
||||
"""
|
||||
Signs with hex private key using SHA1 or SHA256 depending on
|
||||
"digestalg" setting
|
||||
"""
|
||||
digestAlg = BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'digestalg', 'sha1')
|
||||
if digestAlg == "sha1":
|
||||
# SHA1, this will eventually be deprecated
|
||||
return makeCryptor(hexPrivkey).sign(msg, digest_alg=OpenSSL.digest_ecdsa_sha1)
|
||||
return makeCryptor(hexPrivkey).sign(
|
||||
msg, digest_alg=OpenSSL.digest_ecdsa_sha1)
|
||||
elif digestAlg == "sha256":
|
||||
# SHA256. Eventually this will become the default
|
||||
return makeCryptor(hexPrivkey).sign(msg, digest_alg=OpenSSL.EVP_sha256)
|
||||
else:
|
||||
raise ValueError("Unknown digest algorithm %s" % (digestAlg))
|
||||
raise ValueError("Unknown digest algorithm %s" % digestAlg)
|
||||
|
||||
|
||||
def verify(msg, sig, hexPubkey):
|
||||
"""Verifies with hex public key"""
|
||||
"""Verifies with hex public key using SHA1 or SHA256"""
|
||||
# As mentioned above, we must upgrade gracefully to use SHA256. So
|
||||
# let us check the signature using both SHA1 and SHA256 and if one
|
||||
# of them passes then we will be satisfied. Eventually this can
|
||||
# be simplified and we'll only check with SHA256.
|
||||
try:
|
||||
# old SHA1 algorithm.
|
||||
sigVerifyPassed = makePubCryptor(hexPubkey).verify(sig, msg, digest_alg=OpenSSL.digest_ecdsa_sha1)
|
||||
sigVerifyPassed = makePubCryptor(hexPubkey).verify(
|
||||
sig, msg, digest_alg=OpenSSL.digest_ecdsa_sha1)
|
||||
except:
|
||||
sigVerifyPassed = False
|
||||
if sigVerifyPassed:
|
||||
|
@ -89,7 +97,8 @@ def verify(msg, sig, hexPubkey):
|
|||
return True
|
||||
# The signature check using SHA1 failed. Let us try it with SHA256.
|
||||
try:
|
||||
return makePubCryptor(hexPubkey).verify(sig, msg, digest_alg=OpenSSL.EVP_sha256)
|
||||
return makePubCryptor(hexPubkey).verify(
|
||||
sig, msg, digest_alg=OpenSSL.EVP_sha256)
|
||||
except:
|
||||
return False
|
||||
|
||||
|
@ -100,13 +109,14 @@ def pointMult(secret):
|
|||
|
||||
Evidently, this type of error can occur very rarely:
|
||||
|
||||
File "highlevelcrypto.py", line 54, in pointMult
|
||||
group = OpenSSL.EC_KEY_get0_group(k)
|
||||
WindowsError: exception: access violation reading 0x0000000000000008
|
||||
>>> File "highlevelcrypto.py", line 54, in pointMult
|
||||
>>> group = OpenSSL.EC_KEY_get0_group(k)
|
||||
>>> WindowsError: exception: access violation reading 0x0000000000000008
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
k = OpenSSL.EC_KEY_new_by_curve_name(OpenSSL.get_curve('secp256k1'))
|
||||
k = OpenSSL.EC_KEY_new_by_curve_name(
|
||||
OpenSSL.get_curve('secp256k1'))
|
||||
priv_key = OpenSSL.BN_bin2bn(secret, 32, None)
|
||||
group = OpenSSL.EC_KEY_get0_group(k)
|
||||
pub_key = OpenSSL.EC_POINT_new(group)
|
||||
|
|
|
@ -3,6 +3,7 @@ Manipulations with knownNodes dictionary.
|
|||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pickle
|
||||
import threading
|
||||
|
@ -10,28 +11,33 @@ import time
|
|||
|
||||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from network.node import Peer
|
||||
|
||||
knownNodesLock = threading.Lock()
|
||||
"""Thread lock for knownnodes modification"""
|
||||
knownNodes = {stream: {} for stream in range(1, 4)}
|
||||
"""The dict of known nodes for each stream"""
|
||||
|
||||
knownNodesTrimAmount = 2000
|
||||
"""trim stream knownnodes dict to this length"""
|
||||
|
||||
# forget a node after rating is this low
|
||||
knownNodesForgetRating = -0.5
|
||||
"""forget a node after rating is this low"""
|
||||
|
||||
knownNodesActual = False
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
DEFAULT_NODES = (
|
||||
state.Peer('5.45.99.75', 8444),
|
||||
state.Peer('75.167.159.54', 8444),
|
||||
state.Peer('95.165.168.168', 8444),
|
||||
state.Peer('85.180.139.241', 8444),
|
||||
state.Peer('158.222.217.190', 8080),
|
||||
state.Peer('178.62.12.187', 8448),
|
||||
state.Peer('24.188.198.204', 8111),
|
||||
state.Peer('109.147.204.113', 1195),
|
||||
state.Peer('178.11.46.221', 8444)
|
||||
Peer('5.45.99.75', 8444),
|
||||
Peer('75.167.159.54', 8444),
|
||||
Peer('95.165.168.168', 8444),
|
||||
Peer('85.180.139.241', 8444),
|
||||
Peer('158.222.217.190', 8080),
|
||||
Peer('178.62.12.187', 8448),
|
||||
Peer('24.188.198.204', 8111),
|
||||
Peer('109.147.204.113', 1195),
|
||||
Peer('178.11.46.221', 8444)
|
||||
)
|
||||
|
||||
|
||||
|
@ -57,19 +63,17 @@ def json_deserialize_knownnodes(source):
|
|||
for node in json.load(source):
|
||||
peer = node['peer']
|
||||
info = node['info']
|
||||
peer = state.Peer(str(peer['host']), peer.get('port', 8444))
|
||||
peer = Peer(str(peer['host']), peer.get('port', 8444))
|
||||
knownNodes[node['stream']][peer] = info
|
||||
|
||||
if (
|
||||
not (knownNodesActual or info.get('self')) and
|
||||
peer not in DEFAULT_NODES
|
||||
):
|
||||
if not (knownNodesActual
|
||||
or info.get('self')) and peer not in DEFAULT_NODES:
|
||||
knownNodesActual = True
|
||||
|
||||
|
||||
def pickle_deserialize_old_knownnodes(source):
|
||||
"""
|
||||
Unpickle source and reorganize knownnodes dict if it's in old format
|
||||
Unpickle source and reorganize knownnodes dict if it has old format
|
||||
the old format was {Peer:lastseen, ...}
|
||||
the new format is {Peer:{"lastseen":i, "rating":f}}
|
||||
"""
|
||||
|
@ -82,6 +86,7 @@ def pickle_deserialize_old_knownnodes(source):
|
|||
|
||||
|
||||
def saveKnownNodes(dirName=None):
|
||||
"""Save knownnodes to filesystem"""
|
||||
if dirName is None:
|
||||
dirName = state.appdata
|
||||
with knownNodesLock:
|
||||
|
@ -90,6 +95,7 @@ def saveKnownNodes(dirName=None):
|
|||
|
||||
|
||||
def addKnownNode(stream, peer, lastseen=None, is_self=False):
|
||||
"""Add a new node to the dict"""
|
||||
knownNodes[stream][peer] = {
|
||||
"lastseen": lastseen or time.time(),
|
||||
"rating": 1 if is_self else 0,
|
||||
|
@ -98,6 +104,7 @@ def addKnownNode(stream, peer, lastseen=None, is_self=False):
|
|||
|
||||
|
||||
def createDefaultKnownNodes():
|
||||
"""Creating default Knownnodes"""
|
||||
past = time.time() - 2418600 # 28 days - 10 min
|
||||
for peer in DEFAULT_NODES:
|
||||
addKnownNode(1, peer, past)
|
||||
|
@ -105,6 +112,7 @@ def createDefaultKnownNodes():
|
|||
|
||||
|
||||
def readKnownNodes():
|
||||
"""Load knownnodes from filesystem"""
|
||||
try:
|
||||
with open(state.appdata + 'knownnodes.dat', 'rb') as source:
|
||||
with knownNodesLock:
|
||||
|
@ -125,12 +133,13 @@ def readKnownNodes():
|
|||
if onionhostname and ".onion" in onionhostname:
|
||||
onionport = config.safeGetInt('bitmessagesettings', 'onionport')
|
||||
if onionport:
|
||||
self_peer = state.Peer(onionhostname, onionport)
|
||||
self_peer = Peer(onionhostname, onionport)
|
||||
addKnownNode(1, self_peer, is_self=True)
|
||||
state.ownAddresses[self_peer] = True
|
||||
|
||||
|
||||
def increaseRating(peer):
|
||||
"""Increase rating of a peer node"""
|
||||
increaseAmount = 0.1
|
||||
maxRating = 1
|
||||
with knownNodesLock:
|
||||
|
@ -145,6 +154,7 @@ def increaseRating(peer):
|
|||
|
||||
|
||||
def decreaseRating(peer):
|
||||
"""Decrease rating of a peer node"""
|
||||
decreaseAmount = 0.1
|
||||
minRating = -1
|
||||
with knownNodesLock:
|
||||
|
@ -159,6 +169,7 @@ def decreaseRating(peer):
|
|||
|
||||
|
||||
def trimKnownNodes(recAddrStream=1):
|
||||
"""Triming Knownnodes"""
|
||||
if len(knownNodes[recAddrStream]) < \
|
||||
BMConfigParser().safeGetInt("knownnodes", "maxnodes"):
|
||||
return
|
||||
|
@ -175,7 +186,7 @@ def dns():
|
|||
"""Add DNS names to knownnodes"""
|
||||
for port in [8080, 8444]:
|
||||
addKnownNode(
|
||||
1, state.Peer('bootstrap%s.bitmessage.org' % port, port))
|
||||
1, Peer('bootstrap%s.bitmessage.org' % port, port))
|
||||
|
||||
|
||||
def cleanupKnownNodes():
|
||||
|
@ -201,8 +212,8 @@ def cleanupKnownNodes():
|
|||
del knownNodes[stream][node]
|
||||
continue
|
||||
# scrap old nodes (age > 3 hours) with low rating
|
||||
if (age > 10800 and knownNodes[stream][node]["rating"] <=
|
||||
knownNodesForgetRating):
|
||||
if (age > 10800 and knownNodes[stream][node]["rating"]
|
||||
<= knownNodesForgetRating):
|
||||
needToWriteKnownNodesToDisk = True
|
||||
del knownNodes[stream][node]
|
||||
continue
|
||||
|
|
51
src/l10n.py
51
src/l10n.py
|
@ -1,4 +1,6 @@
|
|||
|
||||
"""
|
||||
Localization
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
@ -6,8 +8,7 @@ import time
|
|||
from bmconfigparser import BMConfigParser
|
||||
|
||||
|
||||
#logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger('file_only')
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
DEFAULT_ENCODING = 'ISO8859-1'
|
||||
|
@ -50,7 +51,7 @@ except:
|
|||
|
||||
if BMConfigParser().has_option('bitmessagesettings', 'timeformat'):
|
||||
time_format = BMConfigParser().get('bitmessagesettings', 'timeformat')
|
||||
#Test the format string
|
||||
# Test the format string
|
||||
try:
|
||||
time.strftime(time_format)
|
||||
except:
|
||||
|
@ -59,48 +60,52 @@ if BMConfigParser().has_option('bitmessagesettings', 'timeformat'):
|
|||
else:
|
||||
time_format = DEFAULT_TIME_FORMAT
|
||||
|
||||
#It seems some systems lie about the encoding they use so we perform
|
||||
#comprehensive decoding tests
|
||||
# It seems some systems lie about the encoding they use so we perform
|
||||
# comprehensive decoding tests
|
||||
if time_format != DEFAULT_TIME_FORMAT:
|
||||
try:
|
||||
#Check day names
|
||||
# Check day names
|
||||
for i in xrange(7):
|
||||
unicode(time.strftime(time_format, (0, 0, 0, 0, 0, 0, i, 0, 0)), encoding)
|
||||
#Check month names
|
||||
# Check month names
|
||||
for i in xrange(1, 13):
|
||||
unicode(time.strftime(time_format, (0, i, 0, 0, 0, 0, 0, 0, 0)), encoding)
|
||||
#Check AM/PM
|
||||
# Check AM/PM
|
||||
unicode(time.strftime(time_format, (0, 0, 0, 11, 0, 0, 0, 0, 0)), encoding)
|
||||
unicode(time.strftime(time_format, (0, 0, 0, 13, 0, 0, 0, 0, 0)), encoding)
|
||||
#Check DST
|
||||
# Check DST
|
||||
unicode(time.strftime(time_format, (0, 0, 0, 0, 0, 0, 0, 0, 1)), encoding)
|
||||
except:
|
||||
logger.exception('Could not decode locale formatted timestamp')
|
||||
time_format = DEFAULT_TIME_FORMAT
|
||||
encoding = DEFAULT_ENCODING
|
||||
|
||||
|
||||
def setlocale(category, newlocale):
|
||||
"""Set the locale"""
|
||||
locale.setlocale(category, newlocale)
|
||||
# it looks like some stuff isn't initialised yet when this is called the
|
||||
# first time and its init gets the locale settings from the environment
|
||||
os.environ["LC_ALL"] = newlocale
|
||||
|
||||
def formatTimestamp(timestamp = None, as_unicode = True):
|
||||
#For some reason some timestamps are strings so we need to sanitize.
|
||||
|
||||
def formatTimestamp(timestamp=None, as_unicode=True):
|
||||
"""Return a formatted timestamp"""
|
||||
# For some reason some timestamps are strings so we need to sanitize.
|
||||
if timestamp is not None and not isinstance(timestamp, int):
|
||||
try:
|
||||
timestamp = int(timestamp)
|
||||
except:
|
||||
timestamp = None
|
||||
|
||||
#timestamp can't be less than 0.
|
||||
# timestamp can't be less than 0.
|
||||
if timestamp is not None and timestamp < 0:
|
||||
timestamp = None
|
||||
|
||||
if timestamp is None:
|
||||
timestring = time.strftime(time_format)
|
||||
else:
|
||||
#In case timestamp is too far in the future
|
||||
# In case timestamp is too far in the future
|
||||
try:
|
||||
timestring = time.strftime(time_format, time.localtime(timestamp))
|
||||
except ValueError:
|
||||
|
@ -110,17 +115,21 @@ def formatTimestamp(timestamp = None, as_unicode = True):
|
|||
return unicode(timestring, encoding)
|
||||
return timestring
|
||||
|
||||
|
||||
def getTranslationLanguage():
|
||||
userlocale = None
|
||||
if BMConfigParser().has_option('bitmessagesettings', 'userlocale'):
|
||||
userlocale = BMConfigParser().get('bitmessagesettings', 'userlocale')
|
||||
"""Return the user's language choice"""
|
||||
userlocale = BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'userlocale', 'system')
|
||||
return userlocale if userlocale and userlocale != 'system' else language
|
||||
|
||||
if userlocale in [None, '', 'system']:
|
||||
return language
|
||||
|
||||
return userlocale
|
||||
|
||||
def getWindowsLocale(posixLocale):
|
||||
"""
|
||||
Get the Windows locale
|
||||
Technically this converts the locale string from UNIX to Windows format,
|
||||
because they use different ones in their
|
||||
libraries. E.g. "en_EN.UTF-8" to "english".
|
||||
"""
|
||||
if posixLocale in windowsLanguageMap:
|
||||
return windowsLanguageMap[posixLocale]
|
||||
if "." in posixLocale:
|
||||
|
|
|
@ -1,136 +0,0 @@
|
|||
# pylint: disable=too-many-locals
|
||||
"""
|
||||
This program can be used to print out everything in your Inbox or Sent folders and also take things out of the trash.
|
||||
Scroll down to the bottom to see the functions that you can uncomment. Save then run this file.
|
||||
The functions which only read the database file seem to function just
|
||||
fine even if you have Bitmessage running but you should definitly close
|
||||
it before running the functions that make changes (like taking items out
|
||||
of the trash).
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sqlite3
|
||||
from binascii import hexlify
|
||||
from time import strftime, localtime
|
||||
|
||||
import paths
|
||||
import queues
|
||||
|
||||
|
||||
appdata = paths.lookupAppdataFolder()
|
||||
|
||||
conn = sqlite3.connect(appdata + 'messages.dat')
|
||||
conn.text_factory = str
|
||||
cur = conn.cursor()
|
||||
|
||||
|
||||
def readInbox():
|
||||
"""Print each row from inbox table"""
|
||||
print 'Printing everything in inbox table:'
|
||||
item = '''select * from inbox'''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
for row in output:
|
||||
print row
|
||||
|
||||
|
||||
def readSent():
|
||||
"""Print each row from sent table"""
|
||||
print 'Printing everything in Sent table:'
|
||||
item = '''select * from sent where folder !='trash' '''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
for row in output:
|
||||
(msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime,
|
||||
sleeptill, status, retrynumber, folder, encodingtype, ttl) = row # pylint: disable=unused-variable
|
||||
print(hexlify(msgid), toaddress, 'toripe:', hexlify(toripe), 'fromaddress:', fromaddress, 'ENCODING TYPE:',
|
||||
encodingtype, 'SUBJECT:', repr(subject), 'MESSAGE:', repr(message), 'ACKDATA:', hexlify(ackdata),
|
||||
lastactiontime, status, retrynumber, folder)
|
||||
|
||||
|
||||
def readSubscriptions():
|
||||
"""Print each row from subscriptions table"""
|
||||
print 'Printing everything in subscriptions table:'
|
||||
item = '''select * from subscriptions'''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
for row in output:
|
||||
print row
|
||||
|
||||
|
||||
def readPubkeys():
|
||||
"""Print each row from pubkeys table"""
|
||||
print 'Printing everything in pubkeys table:'
|
||||
item = '''select address, transmitdata, time, usedpersonally from pubkeys'''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
for row in output:
|
||||
address, transmitdata, time, usedpersonally = row
|
||||
print(
|
||||
'Address:', address, '\tTime first broadcast:', unicode(
|
||||
strftime('%a, %d %b %Y %I:%M %p', localtime(time)), 'utf-8'),
|
||||
'\tUsed by me personally:', usedpersonally, '\tFull pubkey message:', hexlify(transmitdata),
|
||||
)
|
||||
|
||||
|
||||
def readInventory():
|
||||
"""Print each row from inventory table"""
|
||||
print 'Printing everything in inventory table:'
|
||||
item = '''select hash, objecttype, streamnumber, payload, expirestime from inventory'''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
for row in output:
|
||||
obj_hash, objecttype, streamnumber, payload, expirestime = row
|
||||
print 'Hash:', hexlify(obj_hash), objecttype, streamnumber, '\t', hexlify(payload), '\t', unicode(
|
||||
strftime('%a, %d %b %Y %I:%M %p', localtime(expirestime)), 'utf-8')
|
||||
|
||||
|
||||
def takeInboxMessagesOutOfTrash():
|
||||
"""Update all inbox messages with folder=trash to have folder=inbox"""
|
||||
item = '''update inbox set folder='inbox' where folder='trash' '''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
_ = cur.fetchall()
|
||||
conn.commit()
|
||||
print 'done'
|
||||
|
||||
|
||||
def takeSentMessagesOutOfTrash():
|
||||
"""Update all sent messages with folder=trash to have folder=sent"""
|
||||
item = '''update sent set folder='sent' where folder='trash' '''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
_ = cur.fetchall()
|
||||
conn.commit()
|
||||
print 'done'
|
||||
|
||||
|
||||
def markAllInboxMessagesAsUnread():
|
||||
"""Update all messages in inbox to have read=0"""
|
||||
item = '''update inbox set read='0' '''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
_ = cur.fetchall()
|
||||
conn.commit()
|
||||
queues.UISignalQueue.put(('changedInboxUnread', None))
|
||||
print 'done'
|
||||
|
||||
|
||||
def vacuum():
|
||||
"""Perform a vacuum on the database"""
|
||||
item = '''VACUUM'''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
_ = cur.fetchall()
|
||||
conn.commit()
|
||||
print 'done'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
readInbox()
|
|
@ -1,7 +1,4 @@
|
|||
"""
|
||||
src/messagetypes/__init__.py
|
||||
============================
|
||||
"""
|
||||
import logging
|
||||
from importlib import import_module
|
||||
from os import path, listdir
|
||||
from string import lower
|
||||
|
@ -9,12 +6,14 @@ try:
|
|||
from kivy.utils import platform
|
||||
except:
|
||||
platform = ''
|
||||
from debug import logger
|
||||
|
||||
import messagetypes
|
||||
import paths
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
class MsgBase(object): # pylint: disable=too-few-public-methods
|
||||
|
||||
class MsgBase(object): # pylint: disable=too-few-public-methods
|
||||
"""Base class for message types"""
|
||||
def __init__(self):
|
||||
self.data = {"": lower(type(self).__name__)}
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
"""
|
||||
src/messagetypes/message.py
|
||||
===========================
|
||||
"""
|
||||
from debug import logger
|
||||
import logging
|
||||
|
||||
from messagetypes import MsgBase
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class Message(MsgBase):
|
||||
"""Encapsulate a message"""
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
"""
|
||||
src/messagetypes/vote.py
|
||||
========================
|
||||
"""
|
||||
from debug import logger
|
||||
import logging
|
||||
|
||||
from messagetypes import MsgBase
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class Vote(MsgBase):
|
||||
"""Module used to vote"""
|
||||
|
|
|
@ -1,31 +1,28 @@
|
|||
# pylint: disable=too-many-branches,protected-access
|
||||
"""
|
||||
Copyright (C) 2013 by Daniel Kraft <d@domob.eu>
|
||||
This file is part of the Bitmessage project.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
.. todo:: from debug import logger crashes PyBitmessage due to a circular dependency. The debug module will also
|
||||
override/disable logging.getLogger() # loggers so module level logging functions are used instead
|
||||
Namecoin queries
|
||||
"""
|
||||
# This file is part of the Bitmessage project.
|
||||
|
||||
from __future__ import absolute_import
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
import base64
|
||||
import httplib
|
||||
|
@ -258,7 +255,7 @@ class namecoinConnection(object):
|
|||
resp = self.con.getresponse()
|
||||
result = resp.read()
|
||||
if resp.status != 200:
|
||||
raise Exception("Namecoin returned status %i: %s" % resp.status, resp.reason)
|
||||
raise Exception("Namecoin returned status %i: %s" % (resp.status, resp.reason))
|
||||
except:
|
||||
logger.info("HTTP receive error")
|
||||
except:
|
||||
|
@ -288,7 +285,7 @@ class namecoinConnection(object):
|
|||
return result
|
||||
|
||||
except socket.error as exc:
|
||||
raise Exception("Socket error in RPC connection: %s" % str(exc))
|
||||
raise Exception("Socket error in RPC connection: %s" % exc)
|
||||
|
||||
|
||||
def lookupNamecoinFolder():
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
from addrthread import AddrThread
|
||||
from announcethread import AnnounceThread
|
||||
from connectionpool import BMConnectionPool
|
||||
from dandelion import Dandelion
|
||||
from downloadthread import DownloadThread
|
||||
from invthread import InvThread
|
||||
from networkthread import BMNetworkThread
|
||||
from receivequeuethread import ReceiveQueueThread
|
||||
from threads import StoppableThread
|
||||
from uploadthread import UploadThread
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BMConnectionPool", "Dandelion",
|
||||
"AddrThread", "AnnounceThread", "BMNetworkThread", "DownloadThread",
|
||||
"InvThread", "ReceiveQueueThread", "UploadThread", "StoppableThread"
|
||||
]
|
|
@ -1,9 +1,14 @@
|
|||
"""
|
||||
Announce addresses as they are received from other hosts
|
||||
"""
|
||||
import Queue
|
||||
|
||||
from helper_threading import StoppableThread
|
||||
import state
|
||||
from helper_random import randomshuffle
|
||||
from network.assemble import assemble_addr
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from queues import addrQueue
|
||||
import state
|
||||
from threads import StoppableThread
|
||||
|
||||
|
||||
class AddrThread(StoppableThread):
|
||||
|
@ -15,15 +20,26 @@ class AddrThread(StoppableThread):
|
|||
while True:
|
||||
try:
|
||||
data = addrQueue.get(False)
|
||||
chunk.append((data[0], data[1]))
|
||||
if len(data) > 2:
|
||||
source = BMConnectionPool().getConnectionByAddr(data[2])
|
||||
chunk.append(data)
|
||||
except Queue.Empty:
|
||||
break
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
# finish
|
||||
if chunk:
|
||||
# Choose peers randomly
|
||||
connections = BMConnectionPool().establishedConnections()
|
||||
randomshuffle(connections)
|
||||
for i in connections:
|
||||
randomshuffle(chunk)
|
||||
filtered = []
|
||||
for stream, peer, seen, destination in chunk:
|
||||
# peer's own address or address received from peer
|
||||
if i.destination in (peer, destination):
|
||||
continue
|
||||
if stream not in i.streams:
|
||||
continue
|
||||
filtered.append((stream, peer, seen))
|
||||
if filtered:
|
||||
i.append_write_buf(assemble_addr(filtered))
|
||||
|
||||
addrQueue.iterate()
|
||||
for i in range(len(chunk)):
|
||||
|
|
|
@ -10,8 +10,7 @@ import time
|
|||
|
||||
import network.asyncore_pollchoose as asyncore
|
||||
import state
|
||||
from debug import logger
|
||||
from helper_threading import BusyError, nonBlocking
|
||||
from threads import BusyError, nonBlocking
|
||||
|
||||
|
||||
class ProcessingError(Exception):
|
||||
|
@ -84,7 +83,8 @@ class AdvancedDispatcher(asyncore.dispatcher):
|
|||
try:
|
||||
cmd = getattr(self, "state_" + str(self.state))
|
||||
except AttributeError:
|
||||
logger.error("Unknown state %s", self.state, exc_info=True)
|
||||
self.logger.error(
|
||||
'Unknown state %s', self.state, exc_info=True)
|
||||
raise UnknownStateError(self.state)
|
||||
if not cmd():
|
||||
break
|
||||
|
|
|
@ -2,22 +2,22 @@
|
|||
src/network/announcethread.py
|
||||
=================================
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
import state
|
||||
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from helper_threading import StoppableThread
|
||||
from network.bmproto import BMProto
|
||||
from network.assemble import assemble_addr
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.udp import UDPSocket
|
||||
import state
|
||||
from node import Peer
|
||||
from threads import StoppableThread
|
||||
|
||||
|
||||
class AnnounceThread(StoppableThread):
|
||||
"""A thread to manage regular announcing of this node"""
|
||||
def __init__(self):
|
||||
super(AnnounceThread, self).__init__(name="Announcer")
|
||||
logger.info("init announce thread")
|
||||
name = "Announcer"
|
||||
|
||||
def run(self):
|
||||
lastSelfAnnounced = 0
|
||||
|
@ -38,6 +38,8 @@ class AnnounceThread(StoppableThread):
|
|||
for stream in state.streamsInWhichIAmParticipating:
|
||||
addr = (
|
||||
stream,
|
||||
state.Peer('127.0.0.1', BMConfigParser().safeGetInt("bitmessagesettings", "port")),
|
||||
Peer(
|
||||
'127.0.0.1',
|
||||
BMConfigParser().safeGetInt('bitmessagesettings', 'port')),
|
||||
time.time())
|
||||
connection.append_write_buf(BMProto.assembleAddr([addr]))
|
||||
connection.append_write_buf(assemble_addr([addr]))
|
||||
|
|
32
src/network/assemble.py
Normal file
32
src/network/assemble.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
"""
|
||||
Create bitmessage protocol command packets
|
||||
"""
|
||||
|
||||
import struct
|
||||
|
||||
import addresses
|
||||
from network.constants import MAX_ADDR_COUNT
|
||||
from network.node import Peer
|
||||
from protocol import CreatePacket, encodeHost
|
||||
|
||||
|
||||
def assemble_addr(peerList):
|
||||
"""Create address command"""
|
||||
if isinstance(peerList, Peer):
|
||||
peerList = (peerList)
|
||||
if not peerList:
|
||||
return b''
|
||||
retval = b''
|
||||
for i in range(0, len(peerList), MAX_ADDR_COUNT):
|
||||
payload = addresses.encodeVarint(
|
||||
len(peerList[i:i + MAX_ADDR_COUNT]))
|
||||
for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]:
|
||||
payload += struct.pack(
|
||||
'>Q', timestamp) # 64-bit time
|
||||
payload += struct.pack('>I', stream)
|
||||
payload += struct.pack(
|
||||
'>q', 1) # service bit flags offered by this node
|
||||
payload += encodeHost(peer.host)
|
||||
payload += struct.pack('>H', peer.port) # remote port
|
||||
retval += CreatePacket('addr', payload)
|
||||
return retval
|
|
@ -1,18 +1,18 @@
|
|||
"""
|
||||
src/network/bmobject.py
|
||||
======================
|
||||
|
||||
BMObject and it's exceptions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
import protocol
|
||||
import state
|
||||
from addresses import calculateInventoryHash
|
||||
from debug import logger
|
||||
from inventory import Inventory
|
||||
from network.dandelion import Dandelion
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class BMObjectInsufficientPOWError(Exception):
|
||||
"""Exception indicating the object doesn't have sufficient proof of work."""
|
||||
|
|
|
@ -5,6 +5,7 @@ src/network/bmproto.py
|
|||
# pylint: disable=attribute-defined-outside-init
|
||||
import base64
|
||||
import hashlib
|
||||
import logging
|
||||
import socket
|
||||
import struct
|
||||
import time
|
||||
|
@ -16,20 +17,27 @@ import knownnodes
|
|||
import protocol
|
||||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from inventory import Inventory
|
||||
from network.advanceddispatcher import AdvancedDispatcher
|
||||
from network.constants import (
|
||||
ADDRESS_ALIVE,
|
||||
MAX_MESSAGE_SIZE,
|
||||
MAX_OBJECT_COUNT,
|
||||
MAX_OBJECT_PAYLOAD_SIZE,
|
||||
MAX_TIME_OFFSET)
|
||||
from network.dandelion import Dandelion
|
||||
from network.bmobject import (
|
||||
BMObject, BMObjectInsufficientPOWError, BMObjectInvalidDataError,
|
||||
BMObjectExpiredError, BMObjectUnwantedStreamError,
|
||||
BMObjectInvalidError, BMObjectAlreadyHaveError)
|
||||
from network.node import Node
|
||||
from network.proxy import ProxyError
|
||||
from node import Node, Peer
|
||||
from objectracker import missingObjects, ObjectTracker
|
||||
from queues import objectProcessorQueue, portCheckerQueue, invQueue, addrQueue
|
||||
from randomtrackingdict import RandomTrackingDict
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class BMProtoError(ProxyError):
|
||||
"""A Bitmessage Protocol Base Error"""
|
||||
|
@ -49,18 +57,6 @@ class BMProtoExcessiveDataError(BMProtoError):
|
|||
class BMProto(AdvancedDispatcher, ObjectTracker):
|
||||
"""A parser for the Bitmessage Protocol"""
|
||||
# pylint: disable=too-many-instance-attributes, too-many-public-methods
|
||||
# ~1.6 MB which is the maximum possible size of an inv message.
|
||||
maxMessageSize = 1600100
|
||||
# 2**18 = 256kB is the maximum size of an object payload
|
||||
maxObjectPayloadSize = 2**18
|
||||
# protocol specification says max 1000 addresses in one addr command
|
||||
maxAddrCount = 1000
|
||||
# protocol specification says max 50000 objects in one inv command
|
||||
maxObjectCount = 50000
|
||||
# address is online if online less than this many seconds ago
|
||||
addressAlive = 10800
|
||||
# maximum time offset
|
||||
maxTimeOffset = 3600
|
||||
timeOffsetWrongCount = 0
|
||||
|
||||
def __init__(self, address=None, sock=None): # pylint: disable=unused-argument, super-init-not-called
|
||||
|
@ -69,6 +65,8 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
# packet/connection from a local IP
|
||||
self.local = False
|
||||
self.pendingUpload = RandomTrackingDict()
|
||||
# canonical identifier of network group
|
||||
self.network_group = None
|
||||
|
||||
def bm_proto_reset(self):
|
||||
"""Reset the bitmessage object parser"""
|
||||
|
@ -96,7 +94,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
self.close_reason = "Bad magic"
|
||||
self.set_state("close")
|
||||
return False
|
||||
if self.payloadLength > BMProto.maxMessageSize:
|
||||
if self.payloadLength > MAX_MESSAGE_SIZE:
|
||||
self.invalid = True
|
||||
self.set_state(
|
||||
"bm_command",
|
||||
|
@ -339,7 +337,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
def _command_inv(self, dandelion=False):
|
||||
items = self.decode_payload_content("l32s")
|
||||
|
||||
if len(items) > BMProto.maxObjectCount:
|
||||
if len(items) > MAX_OBJECT_COUNT:
|
||||
logger.error(
|
||||
'Too many items in %sinv message!', 'd' if dandelion else '')
|
||||
raise BMProtoExcessiveDataError()
|
||||
|
@ -374,7 +372,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
nonce, expiresTime, objectType, version, streamNumber,
|
||||
self.payload, self.payloadOffset)
|
||||
|
||||
if len(self.payload) - self.payloadOffset > BMProto.maxObjectPayloadSize:
|
||||
if len(self.payload) - self.payloadOffset > MAX_OBJECT_PAYLOAD_SIZE:
|
||||
logger.info(
|
||||
'The payload length of this object is too large (%d bytes).'
|
||||
' Ignoring it.', len(self.payload) - self.payloadOffset)
|
||||
|
@ -438,10 +436,10 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
continue
|
||||
if (
|
||||
decodedIP and time.time() - seenTime > 0 and
|
||||
seenTime > time.time() - BMProto.addressAlive and
|
||||
seenTime > time.time() - ADDRESS_ALIVE and
|
||||
port > 0
|
||||
):
|
||||
peer = state.Peer(decodedIP, port)
|
||||
peer = Peer(decodedIP, port)
|
||||
try:
|
||||
if knownnodes.knownNodes[stream][peer]["lastseen"] > seenTime:
|
||||
continue
|
||||
|
@ -457,12 +455,15 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
"rating": 0,
|
||||
"self": False,
|
||||
}
|
||||
addrQueue.put((stream, peer, self.destination))
|
||||
# since we don't track peers outside of knownnodes,
|
||||
# only spread if in knownnodes to prevent flood
|
||||
addrQueue.put((stream, peer, seenTime,
|
||||
self.destination))
|
||||
return True
|
||||
|
||||
def bm_command_portcheck(self):
|
||||
"""Incoming port check request, queue it."""
|
||||
portCheckerQueue.put(state.Peer(self.destination, self.peerNode.port))
|
||||
portCheckerQueue.put(Peer(self.destination, self.peerNode.port))
|
||||
return True
|
||||
|
||||
def bm_command_ping(self):
|
||||
|
@ -508,7 +509,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
self.timeOffset = self.timestamp - int(time.time())
|
||||
logger.debug('remoteProtocolVersion: %i', self.remoteProtocolVersion)
|
||||
logger.debug('services: 0x%08X', self.services)
|
||||
logger.debug('time offset: %i', self.timestamp - int(time.time()))
|
||||
logger.debug('time offset: %i', self.timeOffset)
|
||||
logger.debug('my external IP: %s', self.sockNode.host)
|
||||
logger.debug(
|
||||
'remote node incoming address: %s:%i',
|
||||
|
@ -548,7 +549,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
'Closing connection to old protocol version %s, node: %s',
|
||||
self.remoteProtocolVersion, self.destination)
|
||||
return False
|
||||
if self.timeOffset > BMProto.maxTimeOffset:
|
||||
if self.timeOffset > MAX_TIME_OFFSET:
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="Your time is too far in the future compared to mine."
|
||||
" Closing connection.", fatal=2))
|
||||
|
@ -557,7 +558,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
" Closing connection to it.", self.destination, self.timeOffset)
|
||||
BMProto.timeOffsetWrongCount += 1
|
||||
return False
|
||||
elif self.timeOffset < -BMProto.maxTimeOffset:
|
||||
elif self.timeOffset < -MAX_TIME_OFFSET:
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="Your time is too far in the past compared to mine."
|
||||
" Closing connection.", fatal=2))
|
||||
|
@ -592,12 +593,14 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
# incoming from a peer we're connected to as outbound,
|
||||
# or server full report the same error to counter deanonymisation
|
||||
if (
|
||||
state.Peer(self.destination.host, self.peerNode.port) in
|
||||
connectionpool.BMConnectionPool().inboundConnections or
|
||||
len(connectionpool.BMConnectionPool().inboundConnections) +
|
||||
len(connectionpool.BMConnectionPool().outboundConnections) >
|
||||
BMConfigParser().safeGetInt("bitmessagesettings", "maxtotalconnections") +
|
||||
BMConfigParser().safeGetInt("bitmessagesettings", "maxbootstrapconnections")
|
||||
Peer(self.destination.host, self.peerNode.port)
|
||||
in connectionpool.BMConnectionPool().inboundConnections
|
||||
or len(connectionpool.BMConnectionPool().inboundConnections)
|
||||
+ len(connectionpool.BMConnectionPool().outboundConnections)
|
||||
> BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'maxtotalconnections')
|
||||
+ BMConfigParser().safeGetInt(
|
||||
'bitmessagesettings', 'maxbootstrapconnections')
|
||||
):
|
||||
self.append_write_buf(protocol.assembleErrorMessage(
|
||||
errorText="Server full, please try again later.", fatal=2))
|
||||
|
@ -617,36 +620,10 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def assembleAddr(peerList):
|
||||
"""Build up a packed address"""
|
||||
if isinstance(peerList, state.Peer):
|
||||
peerList = (peerList)
|
||||
if not peerList:
|
||||
return b''
|
||||
retval = b''
|
||||
for i in range(0, len(peerList), BMProto.maxAddrCount):
|
||||
payload = addresses.encodeVarint(
|
||||
len(peerList[i:i + BMProto.maxAddrCount]))
|
||||
for address in peerList[i:i + BMProto.maxAddrCount]:
|
||||
stream, peer, timestamp = address
|
||||
payload += struct.pack(
|
||||
'>Q', timestamp) # 64-bit time
|
||||
payload += struct.pack('>I', stream)
|
||||
payload += struct.pack(
|
||||
'>q', 1) # service bit flags offered by this node
|
||||
payload += protocol.encodeHost(peer.host)
|
||||
payload += struct.pack('>H', peer.port) # remote port
|
||||
retval += protocol.CreatePacket('addr', payload)
|
||||
return retval
|
||||
|
||||
@staticmethod
|
||||
def stopDownloadingObject(hashId, forwardAnyway=False):
|
||||
"""Stop downloading an object"""
|
||||
for connection in (
|
||||
connectionpool.BMConnectionPool().inboundConnections.values() +
|
||||
connectionpool.BMConnectionPool().outboundConnections.values()
|
||||
):
|
||||
for connection in connectionpool.BMConnectionPool().connections():
|
||||
try:
|
||||
del connection.objectsNewToMe[hashId]
|
||||
except KeyError:
|
||||
|
@ -687,7 +664,7 @@ class BMStringParser(BMProto):
|
|||
"""
|
||||
def __init__(self):
|
||||
super(BMStringParser, self).__init__()
|
||||
self.destination = state.Peer('127.0.0.1', 8444)
|
||||
self.destination = Peer('127.0.0.1', 8444)
|
||||
self.payload = None
|
||||
ObjectTracker.__init__(self)
|
||||
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
# pylint: disable=too-many-branches
|
||||
import logging
|
||||
import random # nosec
|
||||
|
||||
import knownnodes
|
||||
import protocol
|
||||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from queues import Queue, portCheckerQueue
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
def getDiscoveredPeer():
|
||||
try:
|
||||
|
@ -24,8 +26,8 @@ def getDiscoveredPeer():
|
|||
def chooseConnection(stream):
|
||||
haveOnion = BMConfigParser().safeGet(
|
||||
"bitmessagesettings", "socksproxytype")[0:5] == 'SOCKS'
|
||||
if state.trustedPeer:
|
||||
return state.trustedPeer
|
||||
onionOnly = BMConfigParser().safeGetBoolean(
|
||||
"bitmessagesettings", "onionservicesonly")
|
||||
try:
|
||||
retval = portCheckerQueue.get(False)
|
||||
portCheckerQueue.task_done()
|
||||
|
@ -47,6 +49,9 @@ def chooseConnection(stream):
|
|||
logger.warning('Error in %s', peer)
|
||||
rating = 0
|
||||
if haveOnion:
|
||||
# do not connect to raw IP addresses--keep all traffic within Tor overlay
|
||||
if onionOnly and not peer.host.endswith('.onion'):
|
||||
continue
|
||||
# onion addresses have a higher priority when SOCKS
|
||||
if peer.host.endswith('.onion') and rating > 0:
|
||||
rating = 1
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
"""
|
||||
src/network/connectionpool.py
|
||||
==================================
|
||||
`BMConnectionPool` class definition
|
||||
"""
|
||||
import errno
|
||||
import logging
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
|
||||
import asyncore_pollchoose as asyncore
|
||||
|
@ -14,7 +15,7 @@ import protocol
|
|||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from connectionchooser import chooseConnection
|
||||
from debug import logger
|
||||
from node import Peer
|
||||
from proxy import Proxy
|
||||
from singleton import Singleton
|
||||
from tcp import (
|
||||
|
@ -22,11 +23,27 @@ from tcp import (
|
|||
TCPConnection, TCPServer)
|
||||
from udp import UDPSocket
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
@Singleton
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
class BMConnectionPool(object):
|
||||
"""Pool of all existing connections"""
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
|
||||
trustedPeer = None
|
||||
"""
|
||||
If the trustedpeer option is specified in keys.dat then this will
|
||||
contain a Peer which will be connected to instead of using the
|
||||
addresses advertised by other peers.
|
||||
|
||||
The expected use case is where the user has a trusted server where
|
||||
they run a Bitmessage daemon permanently. If they then run a second
|
||||
instance of the client on a local machine periodically when they want
|
||||
to check for messages it will sync with the network a lot faster
|
||||
without compromising security.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
asyncore.set_rates(
|
||||
BMConfigParser().safeGetInt(
|
||||
|
@ -39,9 +56,33 @@ class BMConnectionPool(object):
|
|||
self.listeningSockets = {}
|
||||
self.udpSockets = {}
|
||||
self.streams = []
|
||||
self.lastSpawned = 0
|
||||
self.spawnWait = 2
|
||||
self.bootstrapped = False
|
||||
self._lastSpawned = 0
|
||||
self._spawnWait = 2
|
||||
self._bootstrapped = False
|
||||
|
||||
trustedPeer = BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'trustedpeer')
|
||||
try:
|
||||
if trustedPeer:
|
||||
host, port = trustedPeer.split(':')
|
||||
self.trustedPeer = Peer(host, int(port))
|
||||
except ValueError:
|
||||
sys.exit(
|
||||
'Bad trustedpeer config setting! It should be set as'
|
||||
' trustedpeer=<hostname>:<portnumber>'
|
||||
)
|
||||
|
||||
def connections(self):
|
||||
"""
|
||||
Shortcut for combined list of connections from
|
||||
`inboundConnections` and `outboundConnections` dicts
|
||||
"""
|
||||
return self.inboundConnections.values() + self.outboundConnections.values()
|
||||
|
||||
def establishedConnections(self):
|
||||
"""Shortcut for list of connections having fullyEstablished == True"""
|
||||
return [
|
||||
x for x in self.connections() if x.fullyEstablished]
|
||||
|
||||
def connectToStream(self, streamNumber):
|
||||
"""Connect to a bitmessage stream"""
|
||||
|
@ -72,10 +113,7 @@ class BMConnectionPool(object):
|
|||
|
||||
def isAlreadyConnected(self, nodeid):
|
||||
"""Check if we're already connected to this peer"""
|
||||
for i in (
|
||||
self.inboundConnections.values() +
|
||||
self.outboundConnections.values()
|
||||
):
|
||||
for i in self.connections():
|
||||
try:
|
||||
if nodeid == i.nodeid:
|
||||
return True
|
||||
|
@ -101,7 +139,7 @@ class BMConnectionPool(object):
|
|||
if isinstance(connection, UDPSocket):
|
||||
del self.udpSockets[connection.listening.host]
|
||||
elif isinstance(connection, TCPServer):
|
||||
del self.listeningSockets[state.Peer(
|
||||
del self.listeningSockets[Peer(
|
||||
connection.destination.host, connection.destination.port)]
|
||||
elif connection.isOutbound:
|
||||
try:
|
||||
|
@ -127,10 +165,11 @@ class BMConnectionPool(object):
|
|||
"bitmessagesettings", "onionbindip")
|
||||
else:
|
||||
host = '127.0.0.1'
|
||||
if (BMConfigParser().safeGetBoolean(
|
||||
"bitmessagesettings", "sockslisten") or
|
||||
BMConfigParser().safeGet(
|
||||
"bitmessagesettings", "socksproxytype") == "none"):
|
||||
if (
|
||||
BMConfigParser().safeGetBoolean("bitmessagesettings", "sockslisten")
|
||||
or BMConfigParser().safeGet("bitmessagesettings", "socksproxytype")
|
||||
== "none"
|
||||
):
|
||||
# python doesn't like bind + INADDR_ANY?
|
||||
# host = socket.INADDR_ANY
|
||||
host = BMConfigParser().get("network", "bind")
|
||||
|
@ -190,6 +229,7 @@ class BMConnectionPool(object):
|
|||
|
||||
def loop(self): # pylint: disable=too-many-branches,too-many-statements
|
||||
"""Main Connectionpool's loop"""
|
||||
# pylint: disable=too-many-locals
|
||||
# defaults to empty loop if outbound connections are maxed
|
||||
spawnConnections = False
|
||||
acceptConnections = True
|
||||
|
@ -203,11 +243,13 @@ class BMConnectionPool(object):
|
|||
'bitmessagesettings', 'socksproxytype', '')
|
||||
onionsocksproxytype = BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'onionsocksproxytype', '')
|
||||
if (socksproxytype[:5] == 'SOCKS' and
|
||||
not BMConfigParser().safeGetBoolean(
|
||||
'bitmessagesettings', 'sockslisten') and
|
||||
'.onion' not in BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'onionhostname', '')):
|
||||
if (
|
||||
socksproxytype[:5] == 'SOCKS'
|
||||
and not BMConfigParser().safeGetBoolean(
|
||||
'bitmessagesettings', 'sockslisten')
|
||||
and '.onion' not in BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'onionhostname', '')
|
||||
):
|
||||
acceptConnections = False
|
||||
|
||||
# pylint: disable=too-many-nested-blocks
|
||||
|
@ -215,8 +257,8 @@ class BMConnectionPool(object):
|
|||
if not knownnodes.knownNodesActual:
|
||||
self.startBootstrappers()
|
||||
knownnodes.knownNodesActual = True
|
||||
if not self.bootstrapped:
|
||||
self.bootstrapped = True
|
||||
if not self._bootstrapped:
|
||||
self._bootstrapped = True
|
||||
Proxy.proxy = (
|
||||
BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'sockshostname'),
|
||||
|
@ -245,7 +287,7 @@ class BMConnectionPool(object):
|
|||
for i in range(
|
||||
state.maximumNumberOfHalfOpenConnections - pending):
|
||||
try:
|
||||
chosen = chooseConnection(
|
||||
chosen = self.trustedPeer or chooseConnection(
|
||||
helper_random.randomchoice(self.streams))
|
||||
except ValueError:
|
||||
continue
|
||||
|
@ -256,10 +298,22 @@ class BMConnectionPool(object):
|
|||
# don't connect to self
|
||||
if chosen in state.ownAddresses:
|
||||
continue
|
||||
# don't connect to the hosts from the same
|
||||
# network group, defense against sibyl attacks
|
||||
host_network_group = protocol.network_group(
|
||||
chosen.host)
|
||||
same_group = False
|
||||
for j in self.outboundConnections.values():
|
||||
if host_network_group == j.network_group:
|
||||
same_group = True
|
||||
if chosen.host == j.destination.host:
|
||||
knownnodes.decreaseRating(chosen)
|
||||
break
|
||||
if same_group:
|
||||
continue
|
||||
|
||||
try:
|
||||
if (chosen.host.endswith(".onion") and
|
||||
Proxy.onion_proxy is not None):
|
||||
if chosen.host.endswith(".onion") and Proxy.onion_proxy:
|
||||
if onionsocksproxytype == "SOCKS5":
|
||||
self.addConnection(Socks5BMConnection(chosen))
|
||||
elif onionsocksproxytype == "SOCKS4a":
|
||||
|
@ -274,12 +328,9 @@ class BMConnectionPool(object):
|
|||
if e.errno == errno.ENETUNREACH:
|
||||
continue
|
||||
|
||||
self.lastSpawned = time.time()
|
||||
self._lastSpawned = time.time()
|
||||
else:
|
||||
for i in (
|
||||
self.inboundConnections.values() +
|
||||
self.outboundConnections.values()
|
||||
):
|
||||
for i in self.connections():
|
||||
# FIXME: rating will be increased after next connection
|
||||
i.handle_close()
|
||||
|
||||
|
@ -289,8 +340,8 @@ class BMConnectionPool(object):
|
|||
self.startListening()
|
||||
else:
|
||||
for bind in re.sub(
|
||||
'[^\w.]+', ' ', # pylint: disable=anomalous-backslash-in-string
|
||||
BMConfigParser().safeGet('network', 'bind')
|
||||
r'[^\w.]+', ' ',
|
||||
BMConfigParser().safeGet('network', 'bind')
|
||||
).split():
|
||||
self.startListening(bind)
|
||||
logger.info('Listening for incoming connections.')
|
||||
|
@ -299,8 +350,8 @@ class BMConnectionPool(object):
|
|||
self.startUDPSocket()
|
||||
else:
|
||||
for bind in re.sub(
|
||||
'[^\w.]+', ' ', # pylint: disable=anomalous-backslash-in-string
|
||||
BMConfigParser().safeGet('network', 'bind')
|
||||
r'[^\w.]+', ' ',
|
||||
BMConfigParser().safeGet('network', 'bind')
|
||||
).split():
|
||||
self.startUDPSocket(bind)
|
||||
self.startUDPSocket(False)
|
||||
|
@ -317,16 +368,13 @@ class BMConnectionPool(object):
|
|||
i.accepting = i.connecting = i.connected = False
|
||||
logger.info('Stopped udp sockets.')
|
||||
|
||||
loopTime = float(self.spawnWait)
|
||||
if self.lastSpawned < time.time() - self.spawnWait:
|
||||
loopTime = float(self._spawnWait)
|
||||
if self._lastSpawned < time.time() - self._spawnWait:
|
||||
loopTime = 2.0
|
||||
asyncore.loop(timeout=loopTime, count=1000)
|
||||
|
||||
reaper = []
|
||||
for i in (
|
||||
self.inboundConnections.values() +
|
||||
self.outboundConnections.values()
|
||||
):
|
||||
for i in self.connections():
|
||||
minTx = time.time() - 20
|
||||
if i.fullyEstablished:
|
||||
minTx -= 300 - 20
|
||||
|
@ -338,10 +386,8 @@ class BMConnectionPool(object):
|
|||
time.time() - i.lastTx)
|
||||
i.set_state("close")
|
||||
for i in (
|
||||
self.inboundConnections.values() +
|
||||
self.outboundConnections.values() +
|
||||
self.listeningSockets.values() +
|
||||
self.udpSockets.values()
|
||||
self.connections()
|
||||
+ self.listeningSockets.values() + self.udpSockets.values()
|
||||
):
|
||||
if not (i.accepting or i.connecting or i.connected):
|
||||
reaper.append(i)
|
||||
|
|
11
src/network/constants.py
Normal file
11
src/network/constants.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
"""
|
||||
Network protocol constants
|
||||
"""
|
||||
|
||||
|
||||
ADDRESS_ALIVE = 10800 #: address is online if online less than this many seconds ago
|
||||
MAX_ADDR_COUNT = 1000 #: protocol specification says max 1000 addresses in one addr command
|
||||
MAX_MESSAGE_SIZE = 1600100 #: ~1.6 MB which is the maximum possible size of an inv message.
|
||||
MAX_OBJECT_PAYLOAD_SIZE = 2**18 #: 2**18 = 256kB is the maximum size of an object payload
|
||||
MAX_OBJECT_COUNT = 50000 #: protocol specification says max 50000 objects in one inv command
|
||||
MAX_TIME_OFFSET = 3600 #: maximum time offset
|
|
@ -2,6 +2,7 @@
|
|||
src/network/dandelion.py
|
||||
========================
|
||||
"""
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from random import choice, sample, expovariate
|
||||
from threading import RLock
|
||||
|
@ -9,7 +10,6 @@ from time import time
|
|||
|
||||
import connectionpool
|
||||
import state
|
||||
from debug import logging
|
||||
from queues import invQueue
|
||||
from singleton import Singleton
|
||||
|
||||
|
@ -24,6 +24,8 @@ MAX_STEMS = 2
|
|||
|
||||
Stem = namedtuple('Stem', ['child', 'stream', 'timeout'])
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
@Singleton
|
||||
class Dandelion(): # pylint: disable=old-style-class
|
||||
|
@ -72,9 +74,10 @@ class Dandelion(): # pylint: disable=old-style-class
|
|||
|
||||
def removeHash(self, hashId, reason="no reason specified"):
|
||||
"""Switch inventory vector from stem to fluff mode"""
|
||||
logging.debug(
|
||||
"%s entering fluff mode due to %s.",
|
||||
''.join('%02x' % ord(i) for i in hashId), reason)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
'%s entering fluff mode due to %s.',
|
||||
''.join('%02x' % ord(i) for i in hashId), reason)
|
||||
with self.lock:
|
||||
try:
|
||||
del self.hashMap[hashId]
|
||||
|
|
|
@ -1,18 +1,17 @@
|
|||
"""
|
||||
src/network/downloadthread.py
|
||||
=============================
|
||||
`DownloadThread` class definition
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
import addresses
|
||||
import helper_random
|
||||
import protocol
|
||||
from dandelion import Dandelion
|
||||
from debug import logger
|
||||
from helper_threading import StoppableThread
|
||||
from inventory import Inventory
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from objectracker import missingObjects
|
||||
from threads import StoppableThread
|
||||
|
||||
|
||||
class DownloadThread(StoppableThread):
|
||||
|
@ -25,12 +24,11 @@ class DownloadThread(StoppableThread):
|
|||
|
||||
def __init__(self):
|
||||
super(DownloadThread, self).__init__(name="Downloader")
|
||||
logger.info("init download thread")
|
||||
self.lastCleaned = time.time()
|
||||
|
||||
def cleanPending(self):
|
||||
"""Expire pending downloads eventually"""
|
||||
deadline = time.time() - DownloadThread.requestExpires
|
||||
deadline = time.time() - self.requestExpires
|
||||
try:
|
||||
toDelete = [k for k, v in missingObjects.iteritems() if v < deadline]
|
||||
except RuntimeError:
|
||||
|
@ -44,15 +42,12 @@ class DownloadThread(StoppableThread):
|
|||
while not self._stopped:
|
||||
requested = 0
|
||||
# Choose downloading peers randomly
|
||||
connections = [
|
||||
x for x in
|
||||
BMConnectionPool().inboundConnections.values() + BMConnectionPool().outboundConnections.values()
|
||||
if x.fullyEstablished]
|
||||
connections = BMConnectionPool().establishedConnections()
|
||||
helper_random.randomshuffle(connections)
|
||||
try:
|
||||
requestChunk = max(int(min(DownloadThread.maxRequestChunk, len(missingObjects)) / len(connections)), 1)
|
||||
except ZeroDivisionError:
|
||||
requestChunk = 1
|
||||
requestChunk = max(int(
|
||||
min(self.maxRequestChunk, len(missingObjects))
|
||||
/ len(connections)), 1) if connections else 1
|
||||
|
||||
for i in connections:
|
||||
now = time.time()
|
||||
# avoid unnecessary delay
|
||||
|
@ -78,9 +73,11 @@ class DownloadThread(StoppableThread):
|
|||
continue
|
||||
payload[0:0] = addresses.encodeVarint(chunkCount)
|
||||
i.append_write_buf(protocol.CreatePacket('getdata', payload))
|
||||
logger.debug("%s:%i Requesting %i objects", i.destination.host, i.destination.port, chunkCount)
|
||||
self.logger.debug(
|
||||
'%s:%i Requesting %i objects',
|
||||
i.destination.host, i.destination.port, chunkCount)
|
||||
requested += chunkCount
|
||||
if time.time() >= self.lastCleaned + DownloadThread.cleanInterval:
|
||||
if time.time() >= self.lastCleaned + self.cleanInterval:
|
||||
self.cleanPending()
|
||||
if not requested:
|
||||
self.stop.wait(1)
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
"""
|
||||
src/network/http-old.py
|
||||
=======================
|
||||
"""
|
||||
import asyncore
|
||||
import socket
|
||||
import time
|
|
@ -9,10 +9,10 @@ from time import time
|
|||
import addresses
|
||||
import protocol
|
||||
import state
|
||||
from helper_threading import StoppableThread
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.dandelion import Dandelion
|
||||
from queues import invQueue
|
||||
from threads import StoppableThread
|
||||
|
||||
|
||||
def handleExpiredDandelion(expired):
|
||||
|
@ -20,9 +20,7 @@ def handleExpiredDandelion(expired):
|
|||
the object"""
|
||||
if not expired:
|
||||
return
|
||||
for i in \
|
||||
BMConnectionPool().inboundConnections.values() + \
|
||||
BMConnectionPool().outboundConnections.values():
|
||||
for i in BMConnectionPool().connections():
|
||||
if not i.fullyEstablished:
|
||||
continue
|
||||
for x in expired:
|
||||
|
@ -44,9 +42,7 @@ class InvThread(StoppableThread):
|
|||
def handleLocallyGenerated(stream, hashId):
|
||||
"""Locally generated inventory items require special handling"""
|
||||
Dandelion().addHash(hashId, stream=stream)
|
||||
for connection in \
|
||||
BMConnectionPool().inboundConnections.values() + \
|
||||
BMConnectionPool().outboundConnections.values():
|
||||
for connection in BMConnectionPool().connections():
|
||||
if state.dandelion and connection != Dandelion().objectChildStem(hashId):
|
||||
continue
|
||||
connection.objectsNewToThem[hashId] = time()
|
||||
|
@ -67,8 +63,7 @@ class InvThread(StoppableThread):
|
|||
break
|
||||
|
||||
if chunk:
|
||||
for connection in BMConnectionPool().inboundConnections.values() + \
|
||||
BMConnectionPool().outboundConnections.values():
|
||||
for connection in BMConnectionPool().connections():
|
||||
fluffs = []
|
||||
stems = []
|
||||
for inv in chunk:
|
||||
|
@ -96,13 +91,13 @@ class InvThread(StoppableThread):
|
|||
if fluffs:
|
||||
random.shuffle(fluffs)
|
||||
connection.append_write_buf(protocol.CreatePacket(
|
||||
'inv', addresses.encodeVarint(len(fluffs)) +
|
||||
"".join(fluffs)))
|
||||
'inv',
|
||||
addresses.encodeVarint(len(fluffs)) + ''.join(fluffs)))
|
||||
if stems:
|
||||
random.shuffle(stems)
|
||||
connection.append_write_buf(protocol.CreatePacket(
|
||||
'dinv', addresses.encodeVarint(len(stems)) +
|
||||
"".join(stems)))
|
||||
'dinv',
|
||||
addresses.encodeVarint(len(stems)) + ''.join(stems)))
|
||||
|
||||
invQueue.iterate()
|
||||
for i in range(len(chunk)):
|
||||
|
|
|
@ -1,20 +1,13 @@
|
|||
"""
|
||||
src/network/networkthread.py
|
||||
============================
|
||||
"""
|
||||
import network.asyncore_pollchoose as asyncore
|
||||
import state
|
||||
from debug import logger
|
||||
from helper_threading import StoppableThread
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from queues import excQueue
|
||||
from threads import StoppableThread
|
||||
|
||||
|
||||
class BMNetworkThread(StoppableThread):
|
||||
"""A thread to handle network concerns"""
|
||||
def __init__(self):
|
||||
super(BMNetworkThread, self).__init__(name="Asyncore")
|
||||
logger.info("init asyncore thread")
|
||||
name = "Asyncore"
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""
|
||||
src/network/node.py
|
||||
===================
|
||||
Named tuples representing the network peers
|
||||
"""
|
||||
import collections
|
||||
|
||||
Peer = collections.namedtuple('Peer', ['host', 'port'])
|
||||
Node = collections.namedtuple('Node', ['services', 'host', 'port'])
|
||||
|
|
|
@ -95,8 +95,7 @@ class ObjectTracker(object):
|
|||
|
||||
def handleReceivedObject(self, streamNumber, hashid):
|
||||
"""Handling received object"""
|
||||
for i in network.connectionpool.BMConnectionPool().inboundConnections.values(
|
||||
) + network.connectionpool.BMConnectionPool().outboundConnections.values():
|
||||
for i in network.connectionpool.BMConnectionPool().connections():
|
||||
if not i.fullyEstablished:
|
||||
continue
|
||||
try:
|
||||
|
|
|
@ -3,14 +3,16 @@ src/network/proxy.py
|
|||
====================
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
import logging
|
||||
import socket
|
||||
import time
|
||||
|
||||
import asyncore_pollchoose as asyncore
|
||||
import state
|
||||
from advanceddispatcher import AdvancedDispatcher
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from node import Peer
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class ProxyError(Exception):
|
||||
|
@ -88,9 +90,10 @@ class Proxy(AdvancedDispatcher):
|
|||
def onion_proxy(self, address):
|
||||
"""Set onion proxy address"""
|
||||
if address is not None and (
|
||||
not isinstance(address, tuple) or len(address) < 2 or
|
||||
not isinstance(address[0], str) or
|
||||
not isinstance(address[1], int)):
|
||||
not isinstance(address, tuple) or len(address) < 2
|
||||
or not isinstance(address[0], str)
|
||||
or not isinstance(address[1], int)
|
||||
):
|
||||
raise ValueError
|
||||
self.__class__._onion_proxy = address
|
||||
|
||||
|
@ -105,7 +108,7 @@ class Proxy(AdvancedDispatcher):
|
|||
self.__class__._onion_auth = authTuple
|
||||
|
||||
def __init__(self, address):
|
||||
if not isinstance(address, state.Peer):
|
||||
if not isinstance(address, Peer):
|
||||
raise ValueError
|
||||
AdvancedDispatcher.__init__(self)
|
||||
self.destination = address
|
||||
|
@ -144,5 +147,6 @@ class Proxy(AdvancedDispatcher):
|
|||
|
||||
def state_proxy_handshake_done(self):
|
||||
"""Handshake is complete at this point"""
|
||||
self.connectedAt = time.time() # pylint: disable=attribute-defined-outside-init
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.connectedAt = time.time()
|
||||
return False
|
||||
|
|
|
@ -2,18 +2,16 @@ import errno
|
|||
import Queue
|
||||
import socket
|
||||
|
||||
from debug import logger
|
||||
from helper_threading import StoppableThread
|
||||
import state
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.advanceddispatcher import UnknownStateError
|
||||
from queues import receiveDataQueue
|
||||
import state
|
||||
from threads import StoppableThread
|
||||
|
||||
|
||||
class ReceiveQueueThread(StoppableThread):
|
||||
def __init__(self, num=0):
|
||||
super(ReceiveQueueThread, self).__init__(name="ReceiveQueue_%i" % num)
|
||||
logger.info("init receive queue thread %i", num)
|
||||
|
||||
def run(self):
|
||||
while not self._stopped and state.shutdown == 0:
|
||||
|
@ -26,27 +24,26 @@ class ReceiveQueueThread(StoppableThread):
|
|||
break
|
||||
|
||||
# cycle as long as there is data
|
||||
# methods should return False if there isn't enough data, or the connection is to be aborted
|
||||
|
||||
# state_* methods should return False if there isn't enough data,
|
||||
# methods should return False if there isn't enough data,
|
||||
# or the connection is to be aborted
|
||||
|
||||
# state_* methods should return False if there isn't
|
||||
# enough data, or the connection is to be aborted
|
||||
|
||||
try:
|
||||
connection = BMConnectionPool().getConnectionByAddr(dest)
|
||||
# KeyError = connection object not found
|
||||
except KeyError:
|
||||
except KeyError: # connection object not found
|
||||
receiveDataQueue.task_done()
|
||||
continue
|
||||
try:
|
||||
connection.process()
|
||||
# UnknownStateError = state isn't implemented
|
||||
except (UnknownStateError):
|
||||
except UnknownStateError: # state isn't implemented
|
||||
pass
|
||||
except socket.error as err:
|
||||
if err.errno == errno.EBADF:
|
||||
connection.set_state("close", 0)
|
||||
else:
|
||||
logger.error("Socket error: %s", str(err))
|
||||
self.logger.error('Socket error: %s', err)
|
||||
except:
|
||||
logger.error("Error processing", exc_info=True)
|
||||
self.logger.error('Error processing', exc_info=True)
|
||||
receiveDataQueue.task_done()
|
||||
|
|
|
@ -8,7 +8,7 @@ src/network/socks5.py
|
|||
import socket
|
||||
import struct
|
||||
|
||||
import state
|
||||
from node import Peer
|
||||
from proxy import GeneralProxyError, Proxy, ProxyError
|
||||
|
||||
|
||||
|
@ -200,7 +200,7 @@ class Socks5Resolver(Socks5):
|
|||
def __init__(self, host):
|
||||
self.host = host
|
||||
self.port = 8444
|
||||
Socks5.__init__(self, address=state.Peer(self.host, self.port))
|
||||
Socks5.__init__(self, address=Peer(self.host, self.port))
|
||||
|
||||
def state_auth_done(self):
|
||||
"""Perform resolving"""
|
||||
|
|
|
@ -19,16 +19,7 @@ currentSentSpeed = 0
|
|||
|
||||
def connectedHostsList():
|
||||
"""List of all the connected hosts"""
|
||||
retval = []
|
||||
for i in BMConnectionPool().inboundConnections.values() + \
|
||||
BMConnectionPool().outboundConnections.values():
|
||||
if not i.fullyEstablished:
|
||||
continue
|
||||
try:
|
||||
retval.append(i)
|
||||
except AttributeError:
|
||||
pass
|
||||
return retval
|
||||
return BMConnectionPool().establishedConnections()
|
||||
|
||||
|
||||
def sentBytes():
|
||||
|
@ -71,12 +62,6 @@ def downloadSpeed():
|
|||
def pendingDownload():
|
||||
"""Getting pending downloads"""
|
||||
return len(missingObjects)
|
||||
# tmp = {}
|
||||
# for connection in BMConnectionPool().inboundConnections.values() + \
|
||||
# BMConnectionPool().outboundConnections.values():
|
||||
# for k in connection.objectsNewToMe.keys():
|
||||
# tmp[k] = True
|
||||
# return len(tmp)
|
||||
|
||||
|
||||
def pendingUpload():
|
||||
|
|
|
@ -4,6 +4,7 @@ src/network/tcp.py
|
|||
==================
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
import random
|
||||
import socket
|
||||
|
@ -18,18 +19,22 @@ import protocol
|
|||
import shared
|
||||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from helper_random import randomBytes
|
||||
from inventory import Inventory
|
||||
from network.advanceddispatcher import AdvancedDispatcher
|
||||
from network.assemble import assemble_addr
|
||||
from network.bmproto import BMProto
|
||||
from network.constants import MAX_OBJECT_COUNT
|
||||
from network.dandelion import Dandelion
|
||||
from network.objectracker import ObjectTracker
|
||||
from network.socks4a import Socks4aConnection
|
||||
from network.socks5 import Socks5Connection
|
||||
from network.tls import TLSDispatcher
|
||||
from node import Peer
|
||||
from queues import UISignalQueue, invQueue, receiveDataQueue
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
class TCPConnection(BMProto, TLSDispatcher):
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
|
@ -47,7 +52,7 @@ class TCPConnection(BMProto, TLSDispatcher):
|
|||
self.connectedAt = 0
|
||||
self.skipUntil = 0
|
||||
if address is None and sock is not None:
|
||||
self.destination = state.Peer(*sock.getpeername())
|
||||
self.destination = Peer(*sock.getpeername())
|
||||
self.isOutbound = False
|
||||
TLSDispatcher.__init__(self, sock, server_side=True)
|
||||
self.connectedAt = time.time()
|
||||
|
@ -81,6 +86,7 @@ class TCPConnection(BMProto, TLSDispatcher):
|
|||
)
|
||||
except socket.error:
|
||||
pass # it's probably a hostname
|
||||
self.network_group = protocol.network_group(self.destination.host)
|
||||
ObjectTracker.__init__(self) # pylint: disable=non-parent-init-called
|
||||
self.bm_proto_reset()
|
||||
self.set_state("bm_header", expectBytes=protocol.Header.size)
|
||||
|
@ -179,7 +185,7 @@ class TCPConnection(BMProto, TLSDispatcher):
|
|||
for peer, params in addrs[substream]:
|
||||
templist.append((substream, peer, params["lastseen"]))
|
||||
if templist:
|
||||
self.append_write_buf(BMProto.assembleAddr(templist))
|
||||
self.append_write_buf(assemble_addr(templist))
|
||||
|
||||
def sendBigInv(self):
|
||||
"""
|
||||
|
@ -218,7 +224,7 @@ class TCPConnection(BMProto, TLSDispatcher):
|
|||
# Remove -1 below when sufficient time has passed for users to
|
||||
# upgrade to versions of PyBitmessage that accept inv with 50,000
|
||||
# items
|
||||
if objectCount >= BMProto.maxObjectCount - 1:
|
||||
if objectCount >= MAX_OBJECT_COUNT - 1:
|
||||
sendChunk()
|
||||
payload = b''
|
||||
objectCount = 0
|
||||
|
@ -332,7 +338,7 @@ def bootstrap(connection_class):
|
|||
_connection_base = connection_class
|
||||
|
||||
def __init__(self, host, port):
|
||||
self._connection_base.__init__(self, state.Peer(host, port))
|
||||
self._connection_base.__init__(self, Peer(host, port))
|
||||
self.close_reason = self._succeed = False
|
||||
|
||||
def bm_command_addr(self):
|
||||
|
@ -369,6 +375,7 @@ class TCPServer(AdvancedDispatcher):
|
|||
for attempt in range(50):
|
||||
try:
|
||||
if attempt > 0:
|
||||
logger.warning('Failed to bind on port %s', port)
|
||||
port = random.randint(32767, 65535)
|
||||
self.bind((host, port))
|
||||
except socket.error as e:
|
||||
|
@ -376,11 +383,12 @@ class TCPServer(AdvancedDispatcher):
|
|||
continue
|
||||
else:
|
||||
if attempt > 0:
|
||||
logger.warning('Setting port to %s', port)
|
||||
BMConfigParser().set(
|
||||
'bitmessagesettings', 'port', str(port))
|
||||
BMConfigParser().save()
|
||||
break
|
||||
self.destination = state.Peer(host, port)
|
||||
self.destination = Peer(host, port)
|
||||
self.bound = True
|
||||
self.listen(5)
|
||||
|
||||
|
@ -398,7 +406,7 @@ class TCPServer(AdvancedDispatcher):
|
|||
except (TypeError, IndexError):
|
||||
return
|
||||
|
||||
state.ownAddresses[state.Peer(*sock.getsockname())] = True
|
||||
state.ownAddresses[Peer(*sock.getsockname())] = True
|
||||
if (
|
||||
len(connectionpool.BMConnectionPool().inboundConnections) +
|
||||
len(connectionpool.BMConnectionPool().outboundConnections) >
|
||||
|
|
49
src/network/threads.py
Normal file
49
src/network/threads.py
Normal file
|
@ -0,0 +1,49 @@
|
|||
"""Threading primitives for the network package"""
|
||||
|
||||
import logging
|
||||
import random
|
||||
import threading
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
class StoppableThread(threading.Thread):
|
||||
"""Base class for application threads with stopThread method"""
|
||||
name = None
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
def __init__(self, name=None):
|
||||
if name:
|
||||
self.name = name
|
||||
super(StoppableThread, self).__init__(name=self.name)
|
||||
self.stop = threading.Event()
|
||||
self._stopped = False
|
||||
random.seed()
|
||||
self.logger.info('Init thread %s', self.name)
|
||||
|
||||
def stopThread(self):
|
||||
"""Stop the thread"""
|
||||
self._stopped = True
|
||||
self.stop.set()
|
||||
|
||||
|
||||
class BusyError(threading.ThreadError):
|
||||
"""
|
||||
Thread error raised when another connection holds the lock
|
||||
we are trying to acquire.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
def nonBlocking(lock):
|
||||
"""
|
||||
A context manager which acquires given lock non-blocking
|
||||
and raises BusyError if failed to acquire.
|
||||
"""
|
||||
locked = lock.acquire(False)
|
||||
if not locked:
|
||||
raise BusyError
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
lock.release()
|
|
@ -2,17 +2,18 @@
|
|||
SSL/TLS negotiation.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
from debug import logger
|
||||
from network.advanceddispatcher import AdvancedDispatcher
|
||||
import network.asyncore_pollchoose as asyncore
|
||||
from queues import receiveDataQueue
|
||||
import paths
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
_DISCONNECTED_SSL = frozenset((ssl.SSL_ERROR_EOF,))
|
||||
|
||||
|
@ -38,12 +39,13 @@ else:
|
|||
sslProtocolCiphers = "AECDH-AES256-SHA"
|
||||
|
||||
|
||||
class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instance-attributes
|
||||
class TLSDispatcher(AdvancedDispatcher):
|
||||
"""TLS functionality for classes derived from AdvancedDispatcher"""
|
||||
# pylint: disable=too-many-arguments, super-init-not-called, unused-argument
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
# pylint: disable=too-many-arguments,super-init-not-called,unused-argument
|
||||
def __init__(
|
||||
self, address=None, sock=None, certfile=None, keyfile=None,
|
||||
server_side=False, ciphers=sslProtocolCiphers
|
||||
self, address=None, sock=None, certfile=None, keyfile=None,
|
||||
server_side=False, ciphers=sslProtocolCiphers
|
||||
):
|
||||
self.want_read = self.want_write = True
|
||||
if certfile is None:
|
||||
|
@ -95,7 +97,10 @@ class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instanc
|
|||
|
||||
@staticmethod
|
||||
def state_tls_handshake():
|
||||
"""Do nothing while TLS handshake is pending, as during this phase we need to react to callbacks instead"""
|
||||
"""
|
||||
Do nothing while TLS handshake is pending, as during this phase
|
||||
we need to react to callbacks instead
|
||||
"""
|
||||
return False
|
||||
|
||||
def writable(self):
|
||||
|
@ -121,10 +126,11 @@ class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instanc
|
|||
except AttributeError:
|
||||
return AdvancedDispatcher.readable(self)
|
||||
|
||||
def handle_read(self): # pylint: disable=inconsistent-return-statements
|
||||
def handle_read(self): # pylint: disable=inconsistent-return-statements
|
||||
"""
|
||||
Handle reads for sockets during TLS handshake. Requires special treatment as during the handshake, buffers must
|
||||
remain empty and normal reads must be ignored
|
||||
Handle reads for sockets during TLS handshake. Requires special
|
||||
treatment as during the handshake, buffers must remain empty
|
||||
and normal reads must be ignored.
|
||||
"""
|
||||
try:
|
||||
# wait for write buffer flush
|
||||
|
@ -146,10 +152,11 @@ class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instanc
|
|||
self.handle_close()
|
||||
return
|
||||
|
||||
def handle_write(self): # pylint: disable=inconsistent-return-statements
|
||||
def handle_write(self): # pylint: disable=inconsistent-return-statements
|
||||
"""
|
||||
Handle writes for sockets during TLS handshake. Requires special treatment as during the handshake, buffers
|
||||
must remain empty and normal writes must be ignored
|
||||
Handle writes for sockets during TLS handshake. Requires special
|
||||
treatment as during the handshake, buffers must remain empty
|
||||
and normal writes must be ignored.
|
||||
"""
|
||||
try:
|
||||
# wait for write buffer flush
|
||||
|
@ -192,18 +199,23 @@ class TLSDispatcher(AdvancedDispatcher): # pylint: disable=too-many-instanc
|
|||
if not (self.want_write or self.want_read):
|
||||
raise
|
||||
except socket.error as err:
|
||||
if err.errno in asyncore._DISCONNECTED: # pylint: disable=protected-access
|
||||
# pylint: disable=protected-access
|
||||
if err.errno in asyncore._DISCONNECTED:
|
||||
self.handle_close()
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
if sys.version_info >= (2, 7, 9):
|
||||
self.tlsVersion = self.sslSocket.version()
|
||||
logger.debug("%s:%i: TLS handshake success, TLS protocol version: %s",
|
||||
self.destination.host, self.destination.port, self.sslSocket.version())
|
||||
logger.debug(
|
||||
'%s:%i: TLS handshake success, TLS protocol version: %s',
|
||||
self.destination.host, self.destination.port,
|
||||
self.tlsVersion)
|
||||
else:
|
||||
self.tlsVersion = "TLSv1"
|
||||
logger.debug("%s:%i: TLS handshake success", self.destination.host, self.destination.port)
|
||||
logger.debug(
|
||||
'%s:%i: TLS handshake success',
|
||||
self.destination.host, self.destination.port)
|
||||
# The handshake has completed, so remove this channel and...
|
||||
self.del_channel()
|
||||
self.set_socket(self.sslSocket)
|
||||
|
|
|
@ -2,24 +2,28 @@
|
|||
src/network/udp.py
|
||||
==================
|
||||
"""
|
||||
import logging
|
||||
import time
|
||||
import socket
|
||||
|
||||
import state
|
||||
import protocol
|
||||
from bmproto import BMProto
|
||||
from debug import logger
|
||||
from node import Peer
|
||||
from objectracker import ObjectTracker
|
||||
from queues import receiveDataQueue
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes
|
||||
|
||||
class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes
|
||||
"""Bitmessage protocol over UDP (class)"""
|
||||
port = 8444
|
||||
announceInterval = 60
|
||||
|
||||
def __init__(self, host=None, sock=None, announcing=False):
|
||||
super(BMProto, self).__init__(sock=sock) # pylint: disable=bad-super-call
|
||||
# pylint: disable=bad-super-call
|
||||
super(BMProto, self).__init__(sock=sock)
|
||||
self.verackReceived = True
|
||||
self.verackSent = True
|
||||
# .. todo:: sort out streams
|
||||
|
@ -40,8 +44,8 @@ class UDPSocket(BMProto): # pylint: disable=too-many-instance-attribut
|
|||
else:
|
||||
self.socket = sock
|
||||
self.set_socket_reuse()
|
||||
self.listening = state.Peer(*self.socket.getsockname())
|
||||
self.destination = state.Peer(*self.socket.getsockname())
|
||||
self.listening = Peer(*self.socket.getsockname())
|
||||
self.destination = Peer(*self.socket.getsockname())
|
||||
ObjectTracker.__init__(self)
|
||||
self.connecting = False
|
||||
self.connected = True
|
||||
|
@ -79,7 +83,8 @@ class UDPSocket(BMProto): # pylint: disable=too-many-instance-attribut
|
|||
decodedIP = protocol.checkIPAddress(str(ip))
|
||||
if stream not in state.streamsInWhichIAmParticipating:
|
||||
continue
|
||||
if (seenTime < time.time() - self.maxTimeOffset or seenTime > time.time() + self.maxTimeOffset):
|
||||
if (seenTime < time.time() - self.maxTimeOffset
|
||||
or seenTime > time.time() + self.maxTimeOffset):
|
||||
continue
|
||||
if decodedIP is False:
|
||||
# if the address isn't local, interpret it as
|
||||
|
@ -92,7 +97,7 @@ class UDPSocket(BMProto): # pylint: disable=too-many-instance-attribut
|
|||
self.destination.host, self.destination.port, remoteport)
|
||||
if self.local:
|
||||
state.discoveredPeers[
|
||||
state.Peer(self.destination.host, remoteport)
|
||||
Peer(self.destination.host, remoteport)
|
||||
] = time.time()
|
||||
return True
|
||||
|
||||
|
@ -127,7 +132,7 @@ class UDPSocket(BMProto): # pylint: disable=too-many-instance-attribut
|
|||
logger.error("socket error: %s", e)
|
||||
return
|
||||
|
||||
self.destination = state.Peer(*addr)
|
||||
self.destination = Peer(*addr)
|
||||
encodedAddr = protocol.encodeHost(addr[0])
|
||||
self.local = bool(protocol.checkIPAddress(encodedAddr, True))
|
||||
# overwrite the old buffer to avoid mixing data and so that
|
||||
|
|
|
@ -1,43 +1,40 @@
|
|||
"""
|
||||
src/network/uploadthread.py
|
||||
`UploadThread` class definition
|
||||
"""
|
||||
# pylint: disable=unsubscriptable-object
|
||||
import time
|
||||
|
||||
import helper_random
|
||||
import protocol
|
||||
from debug import logger
|
||||
from helper_threading import StoppableThread
|
||||
from inventory import Inventory
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.dandelion import Dandelion
|
||||
from randomtrackingdict import RandomTrackingDict
|
||||
from threads import StoppableThread
|
||||
|
||||
|
||||
class UploadThread(StoppableThread):
|
||||
"""This is a thread that uploads the objects that the peers requested from me """
|
||||
"""
|
||||
This is a thread that uploads the objects that the peers requested from me
|
||||
"""
|
||||
maxBufSize = 2097152 # 2MB
|
||||
|
||||
def __init__(self):
|
||||
super(UploadThread, self).__init__(name="Uploader")
|
||||
logger.info("init upload thread")
|
||||
name = "Uploader"
|
||||
|
||||
def run(self):
|
||||
while not self._stopped:
|
||||
uploaded = 0
|
||||
# Choose downloading peers randomly
|
||||
connections = [x for x in BMConnectionPool().inboundConnections.values() +
|
||||
BMConnectionPool().outboundConnections.values() if x.fullyEstablished]
|
||||
# Choose uploading peers randomly
|
||||
connections = BMConnectionPool().establishedConnections()
|
||||
helper_random.randomshuffle(connections)
|
||||
for i in connections:
|
||||
now = time.time()
|
||||
# avoid unnecessary delay
|
||||
if i.skipUntil >= now:
|
||||
continue
|
||||
if len(i.write_buf) > UploadThread.maxBufSize:
|
||||
if len(i.write_buf) > self.maxBufSize:
|
||||
continue
|
||||
try:
|
||||
request = i.pendingUpload.randomKeys(RandomTrackingDict.maxPending)
|
||||
request = i.pendingUpload.randomKeys(
|
||||
RandomTrackingDict.maxPending)
|
||||
except KeyError:
|
||||
continue
|
||||
payload = bytearray()
|
||||
|
@ -47,22 +44,26 @@ class UploadThread(StoppableThread):
|
|||
if Dandelion().hasHash(chunk) and \
|
||||
i != Dandelion().objectChildStem(chunk):
|
||||
i.antiIntersectionDelay()
|
||||
logger.info('%s asked for a stem object we didn\'t offer to it.',
|
||||
i.destination)
|
||||
self.logger.info(
|
||||
'%s asked for a stem object we didn\'t offer to it.',
|
||||
i.destination)
|
||||
break
|
||||
try:
|
||||
payload.extend(protocol.CreatePacket('object',
|
||||
Inventory()[chunk].payload))
|
||||
payload.extend(protocol.CreatePacket(
|
||||
'object', Inventory()[chunk].payload))
|
||||
chunk_count += 1
|
||||
except KeyError:
|
||||
i.antiIntersectionDelay()
|
||||
logger.info('%s asked for an object we don\'t have.', i.destination)
|
||||
self.logger.info(
|
||||
'%s asked for an object we don\'t have.',
|
||||
i.destination)
|
||||
break
|
||||
if not chunk_count:
|
||||
continue
|
||||
i.append_write_buf(payload)
|
||||
logger.debug("%s:%i Uploading %i objects",
|
||||
i.destination.host, i.destination.port, chunk_count)
|
||||
self.logger.debug(
|
||||
'%s:%i Uploading %i objects',
|
||||
i.destination.host, i.destination.port, chunk_count)
|
||||
uploaded += chunk_count
|
||||
if not uploaded:
|
||||
self.stop.wait(1)
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
#!/usr/bin/env python2.7
|
||||
"""
|
||||
Module for Proof of Work using OpenCL
|
||||
"""
|
||||
from struct import pack, unpack
|
||||
import time
|
||||
import hashlib
|
||||
import random
|
||||
import os
|
||||
|
||||
from bmconfigparser import BMConfigParser
|
||||
|
@ -27,6 +28,8 @@ except ImportError:
|
|||
|
||||
|
||||
def initCL():
|
||||
"""Initlialise OpenCL engine"""
|
||||
# pylint: disable=global-statement
|
||||
global ctx, queue, program, hash_dt, libAvailable
|
||||
if libAvailable is False:
|
||||
return
|
||||
|
@ -40,12 +43,13 @@ def initCL():
|
|||
for platform in cl.get_platforms():
|
||||
gpus.extend(platform.get_devices(device_type=cl.device_type.GPU))
|
||||
if BMConfigParser().safeGet("bitmessagesettings", "opencl") == platform.vendor:
|
||||
enabledGpus.extend(platform.get_devices(device_type=cl.device_type.GPU))
|
||||
enabledGpus.extend(platform.get_devices(
|
||||
device_type=cl.device_type.GPU))
|
||||
if platform.vendor not in vendors:
|
||||
vendors.append(platform.vendor)
|
||||
except:
|
||||
pass
|
||||
if (len(enabledGpus) > 0):
|
||||
if enabledGpus:
|
||||
ctx = cl.Context(devices=enabledGpus)
|
||||
queue = cl.CommandQueue(ctx)
|
||||
f = open(os.path.join(paths.codePath(), "bitmsghash", 'bitmsghash.cl'), 'r')
|
||||
|
@ -55,23 +59,29 @@ def initCL():
|
|||
else:
|
||||
logger.info("No OpenCL GPUs found")
|
||||
del enabledGpus[:]
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
logger.error("OpenCL fail: ", exc_info=True)
|
||||
del enabledGpus[:]
|
||||
|
||||
|
||||
def openclAvailable():
|
||||
return (len(gpus) > 0)
|
||||
"""Are there any OpenCL GPUs available?"""
|
||||
return bool(gpus)
|
||||
|
||||
|
||||
def openclEnabled():
|
||||
return (len(enabledGpus) > 0)
|
||||
"""Is OpenCL enabled (and available)?"""
|
||||
return bool(enabledGpus)
|
||||
|
||||
def do_opencl_pow(hash, target):
|
||||
|
||||
def do_opencl_pow(hash_, target):
|
||||
"""Perform PoW using OpenCL"""
|
||||
output = numpy.zeros(1, dtype=[('v', numpy.uint64, 1)])
|
||||
if (len(enabledGpus) == 0):
|
||||
if not enabledGpus:
|
||||
return output[0][0]
|
||||
|
||||
data = numpy.zeros(1, dtype=hash_dt, order='C')
|
||||
data[0]['v'] = ("0000000000000000" + hash).decode("hex")
|
||||
data[0]['v'] = ("0000000000000000" + hash_).decode("hex")
|
||||
data[0]['target'] = target
|
||||
|
||||
hash_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=data)
|
||||
|
@ -83,9 +93,8 @@ def do_opencl_pow(hash, target):
|
|||
kernel.set_arg(0, hash_buf)
|
||||
kernel.set_arg(1, dest_buf)
|
||||
|
||||
start = time.time()
|
||||
progress = 0
|
||||
globamt = worksize*2000
|
||||
globamt = worksize * 2000
|
||||
|
||||
while output[0][0] == 0 and shutdown == 0:
|
||||
kernel.set_arg(2, pack("<Q", progress))
|
||||
|
@ -96,20 +105,18 @@ def do_opencl_pow(hash, target):
|
|||
cl.enqueue_copy(queue, output, dest_buf)
|
||||
queue.finish()
|
||||
progress += globamt
|
||||
sofar = time.time() - start
|
||||
# logger.debug("Working for %.3fs, %.2f Mh/s", sofar, (progress / sofar) / 1000000)
|
||||
if shutdown != 0:
|
||||
raise Exception ("Interrupted")
|
||||
taken = time.time() - start
|
||||
raise Exception("Interrupted")
|
||||
# logger.debug("Took %d tries.", progress)
|
||||
return output[0][0]
|
||||
|
||||
#initCL()
|
||||
|
||||
if __name__ == "__main__":
|
||||
target = 54227212183L
|
||||
initialHash = "3758f55b5a8d902fd3597e4ce6a2d3f23daff735f65d9698c270987f4e67ad590b93f3ffeba0ef2fd08a8dc2f87b68ae5a0dc819ab57f22ad2c4c9c8618a43b3".decode("hex")
|
||||
nonce = do_opencl_pow(initialHash.encode("hex"), target)
|
||||
trialValue, = unpack('>Q',hashlib.sha512(hashlib.sha512(pack('>Q',nonce) + initialHash).digest()).digest()[0:8])
|
||||
print "{} - value {} < {}".format(nonce, trialValue, target)
|
||||
|
||||
initCL()
|
||||
target_ = 54227212183
|
||||
initialHash = ("3758f55b5a8d902fd3597e4ce6a2d3f23daff735f65d9698c270987f4e67ad590"
|
||||
"b93f3ffeba0ef2fd08a8dc2f87b68ae5a0dc819ab57f22ad2c4c9c8618a43b3").decode("hex")
|
||||
nonce = do_opencl_pow(initialHash.encode("hex"), target_)
|
||||
trialValue, = unpack(
|
||||
'>Q', hashlib.sha512(hashlib.sha512(pack('>Q', nonce) + initialHash).digest()).digest()[0:8])
|
||||
print "{} - value {} < {}".format(nonce, trialValue, target_)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
"""
|
||||
src/paths.py
|
||||
============
|
||||
Path related functions
|
||||
"""
|
||||
# pylint: disable=import-error
|
||||
import logging
|
||||
|
@ -11,6 +10,7 @@ from datetime import datetime
|
|||
from shutil import move
|
||||
from kivy.utils import platform
|
||||
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
# When using py2exe or py2app, the variable frozen is added to the sys
|
||||
|
@ -46,7 +46,8 @@ def lookupAppdataFolder():
|
|||
dataFolder = os.path.join(
|
||||
os.environ['HOME'],
|
||||
'Library/Application Support/', APPNAME
|
||||
) + '/' # ..fixme:: should also be os.path.sep
|
||||
) + '/'
|
||||
|
||||
except KeyError:
|
||||
sys.exit(
|
||||
'Could not find home folder, please report this message'
|
||||
|
@ -83,6 +84,7 @@ def codePath():
|
|||
return os.path.dirname(__file__)
|
||||
return (
|
||||
os.environ.get('RESOURCEPATH')
|
||||
# pylint: disable=protected-access
|
||||
if frozen == "macosx_app" else sys._MEIPASS)
|
||||
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ class DebugLogger(object):
|
|||
"""Safe logger wrapper for tor and plugin's logs"""
|
||||
# pylint: disable=too-few-public-methods
|
||||
def __init__(self):
|
||||
self._logger = logging.getLogger(__name__.split('.', 1)[0])
|
||||
self._logger = logging.getLogger('default')
|
||||
self._levels = {
|
||||
'err': 40,
|
||||
'warn': 30,
|
||||
|
|
171
src/protocol.py
171
src/protocol.py
|
@ -1,12 +1,8 @@
|
|||
# pylint: disable=too-many-boolean-expressions,too-many-return-statements,too-many-locals,too-many-statements
|
||||
"""
|
||||
protocol.py
|
||||
===========
|
||||
|
||||
Low-level protocol-related functions.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
# pylint: disable=too-many-boolean-expressions,too-many-return-statements
|
||||
# pylint: disable=too-many-locals,too-many-statements
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
|
@ -14,7 +10,6 @@ import random
|
|||
import socket
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from binascii import hexlify
|
||||
from struct import pack, unpack, Struct
|
||||
|
||||
|
@ -29,10 +24,18 @@ from fallback import RIPEMD160Hash
|
|||
from helper_sql import sqlExecute
|
||||
from version import softwareVersion
|
||||
|
||||
|
||||
# Service flags
|
||||
#: This is a normal network node
|
||||
NODE_NETWORK = 1
|
||||
#: This node supports SSL/TLS in the current connect (python < 2.7.9
|
||||
#: only supports an SSL client, so in that case it would only have this
|
||||
#: on when the connection is a client).
|
||||
NODE_SSL = 2
|
||||
# (Proposal) This node may do PoW on behalf of some its peers
|
||||
# (PoW offloading/delegating), but it doesn't have to. Clients may have
|
||||
# to meet additional requirements (e.g. TLS authentication)
|
||||
# NODE_POW = 4
|
||||
#: Node supports dandelion
|
||||
NODE_DANDELION = 8
|
||||
|
||||
# Bitfield flags
|
||||
|
@ -94,7 +97,8 @@ def isBitSetWithinBitfield(fourByteString, n):
|
|||
def encodeHost(host):
|
||||
"""Encode a given host to be used in low-level socket operations"""
|
||||
if host.find('.onion') > -1:
|
||||
return '\xfd\x87\xd8\x7e\xeb\x43' + base64.b32decode(host.split(".")[0], True)
|
||||
return '\xfd\x87\xd8\x7e\xeb\x43' + base64.b32decode(
|
||||
host.split(".")[0], True)
|
||||
elif host.find(':') == -1:
|
||||
return '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + \
|
||||
socket.inet_aton(host)
|
||||
|
@ -110,8 +114,39 @@ def networkType(host):
|
|||
return 'IPv6'
|
||||
|
||||
|
||||
def network_group(host):
|
||||
"""Canonical identifier of network group
|
||||
simplified, borrowed from
|
||||
GetGroup() in src/netaddresses.cpp in bitcoin core"""
|
||||
if not isinstance(host, str):
|
||||
return None
|
||||
network_type = networkType(host)
|
||||
try:
|
||||
raw_host = encodeHost(host)
|
||||
except socket.error:
|
||||
return host
|
||||
if network_type == 'IPv4':
|
||||
decoded_host = checkIPv4Address(raw_host[12:], True)
|
||||
if decoded_host:
|
||||
# /16 subnet
|
||||
return raw_host[12:14]
|
||||
elif network_type == 'IPv6':
|
||||
decoded_host = checkIPv6Address(raw_host, True)
|
||||
if decoded_host:
|
||||
# /32 subnet
|
||||
return raw_host[0:12]
|
||||
else:
|
||||
# just host, e.g. for tor
|
||||
return host
|
||||
# global network type group for local, private, unroutable
|
||||
return network_type
|
||||
|
||||
|
||||
def checkIPAddress(host, private=False):
|
||||
"""Returns hostStandardFormat if it is a valid IP address, otherwise returns False"""
|
||||
"""
|
||||
Returns hostStandardFormat if it is a valid IP address,
|
||||
otherwise returns False
|
||||
"""
|
||||
if host[0:12] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF':
|
||||
hostStandardFormat = socket.inet_ntop(socket.AF_INET, host[12:])
|
||||
return checkIPv4Address(host[12:], hostStandardFormat, private)
|
||||
|
@ -127,35 +162,46 @@ def checkIPAddress(host, private=False):
|
|||
except ValueError:
|
||||
return False
|
||||
if hostStandardFormat == "":
|
||||
# This can happen on Windows systems which are not 64-bit compatible
|
||||
# so let us drop the IPv6 address.
|
||||
# This can happen on Windows systems which are
|
||||
# not 64-bit compatible so let us drop the IPv6 address.
|
||||
return False
|
||||
return checkIPv6Address(host, hostStandardFormat, private)
|
||||
|
||||
|
||||
def checkIPv4Address(host, hostStandardFormat, private=False):
|
||||
"""Returns hostStandardFormat if it is an IPv4 address, otherwise returns False"""
|
||||
"""
|
||||
Returns hostStandardFormat if it is an IPv4 address,
|
||||
otherwise returns False
|
||||
"""
|
||||
if host[0] == '\x7F': # 127/8
|
||||
if not private:
|
||||
logger.debug('Ignoring IP address in loopback range: %s', hostStandardFormat)
|
||||
logger.debug(
|
||||
'Ignoring IP address in loopback range: %s',
|
||||
hostStandardFormat)
|
||||
return hostStandardFormat if private else False
|
||||
if host[0] == '\x0A': # 10/8
|
||||
if not private:
|
||||
logger.debug('Ignoring IP address in private range: %s', hostStandardFormat)
|
||||
logger.debug(
|
||||
'Ignoring IP address in private range: %s', hostStandardFormat)
|
||||
return hostStandardFormat if private else False
|
||||
if host[0:2] == '\xC0\xA8': # 192.168/16
|
||||
if not private:
|
||||
logger.debug('Ignoring IP address in private range: %s', hostStandardFormat)
|
||||
logger.debug(
|
||||
'Ignoring IP address in private range: %s', hostStandardFormat)
|
||||
return hostStandardFormat if private else False
|
||||
if host[0:2] >= '\xAC\x10' and host[0:2] < '\xAC\x20': # 172.16/12
|
||||
if not private:
|
||||
logger.debug('Ignoring IP address in private range: %s', hostStandardFormat)
|
||||
logger.debug(
|
||||
'Ignoring IP address in private range: %s', hostStandardFormat)
|
||||
return hostStandardFormat if private else False
|
||||
return False if private else hostStandardFormat
|
||||
|
||||
|
||||
def checkIPv6Address(host, hostStandardFormat, private=False):
|
||||
"""Returns hostStandardFormat if it is an IPv6 address, otherwise returns False"""
|
||||
"""
|
||||
Returns hostStandardFormat if it is an IPv6 address,
|
||||
otherwise returns False
|
||||
"""
|
||||
if host == ('\x00' * 15) + '\x01':
|
||||
if not private:
|
||||
logger.debug('Ignoring loopback address: %s', hostStandardFormat)
|
||||
|
@ -166,7 +212,8 @@ def checkIPv6Address(host, hostStandardFormat, private=False):
|
|||
return hostStandardFormat if private else False
|
||||
if (ord(host[0]) & 0xfe) == 0xfc:
|
||||
if not private:
|
||||
logger.debug('Ignoring unique local address: %s', hostStandardFormat)
|
||||
logger.debug(
|
||||
'Ignoring unique local address: %s', hostStandardFormat)
|
||||
return hostStandardFormat if private else False
|
||||
return False if private else hostStandardFormat
|
||||
|
||||
|
@ -187,28 +234,27 @@ def haveSSL(server=False):
|
|||
|
||||
def checkSocksIP(host):
|
||||
"""Predicate to check if we're using a SOCKS proxy"""
|
||||
sockshostname = BMConfigParser().safeGet(
|
||||
'bitmessagesettings', 'sockshostname')
|
||||
try:
|
||||
if state.socksIP is None or not state.socksIP:
|
||||
state.socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
||||
# uninitialised
|
||||
except NameError:
|
||||
state.socksIP = socket.gethostbyname(BMConfigParser().get("bitmessagesettings", "sockshostname"))
|
||||
# resolving failure
|
||||
except socket.gaierror:
|
||||
state.socksIP = BMConfigParser().get("bitmessagesettings", "sockshostname")
|
||||
if not state.socksIP:
|
||||
state.socksIP = socket.gethostbyname(sockshostname)
|
||||
except NameError: # uninitialised
|
||||
state.socksIP = socket.gethostbyname(sockshostname)
|
||||
except (TypeError, socket.gaierror): # None, resolving failure
|
||||
state.socksIP = sockshostname
|
||||
return state.socksIP == host
|
||||
|
||||
|
||||
def isProofOfWorkSufficient(data,
|
||||
nonceTrialsPerByte=0,
|
||||
payloadLengthExtraBytes=0,
|
||||
recvTime=0):
|
||||
def isProofOfWorkSufficient(
|
||||
data, nonceTrialsPerByte=0, payloadLengthExtraBytes=0, recvTime=0):
|
||||
"""
|
||||
Validate an object's Proof of Work using method described in:
|
||||
https://bitmessage.org/wiki/Proof_of_work
|
||||
Validate an object's Proof of Work using method described
|
||||
`here <https://bitmessage.org/wiki/Proof_of_work>`_
|
||||
|
||||
Arguments:
|
||||
int nonceTrialsPerByte (default: from default.py)
|
||||
int payloadLengthExtraBytes (default: from default.py)
|
||||
int nonceTrialsPerByte (default: from `.defaults`)
|
||||
int payloadLengthExtraBytes (default: from `.defaults`)
|
||||
float recvTime (optional) UNIX epoch time when object was
|
||||
received from the network (default: current system time)
|
||||
Returns:
|
||||
|
@ -222,18 +268,20 @@ def isProofOfWorkSufficient(data,
|
|||
TTL = endOfLifeTime - (int(recvTime) if recvTime else int(time.time()))
|
||||
if TTL < 300:
|
||||
TTL = 300
|
||||
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
|
||||
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8])
|
||||
return POW <= 2 ** 64 / (nonceTrialsPerByte *
|
||||
(len(data) + payloadLengthExtraBytes +
|
||||
((TTL * (len(data) + payloadLengthExtraBytes)) / (2 ** 16))))
|
||||
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(
|
||||
data[:8] + hashlib.sha512(data[8:]).digest()
|
||||
).digest()).digest()[0:8])
|
||||
return POW <= 2 ** 64 / (
|
||||
nonceTrialsPerByte * (
|
||||
len(data) + payloadLengthExtraBytes +
|
||||
((TTL * (len(data) + payloadLengthExtraBytes)) / (2 ** 16))))
|
||||
|
||||
|
||||
# Packet creation
|
||||
|
||||
|
||||
def CreatePacket(command, payload=''):
|
||||
"""Construct and return a number of bytes from a payload"""
|
||||
"""Construct and return a packet"""
|
||||
payload_length = len(payload)
|
||||
checksum = hashlib.sha512(payload).digest()[0:4]
|
||||
|
||||
|
@ -243,8 +291,13 @@ def CreatePacket(command, payload=''):
|
|||
return bytes(b)
|
||||
|
||||
|
||||
def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server=False, nodeid=None):
|
||||
"""Construct the payload of a version message, return the resultng bytes of running CreatePacket() on it"""
|
||||
def assembleVersionMessage(
|
||||
remoteHost, remotePort, participatingStreams, server=False, nodeid=None
|
||||
):
|
||||
"""
|
||||
Construct the payload of a version message,
|
||||
return the resulting bytes of running `CreatePacket` on it
|
||||
"""
|
||||
payload = ''
|
||||
payload += pack('>L', 3) # protocol version.
|
||||
# bitflags of the services I offer.
|
||||
|
@ -256,9 +309,10 @@ def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server=
|
|||
)
|
||||
payload += pack('>q', int(time.time()))
|
||||
|
||||
payload += pack(
|
||||
'>q', 1) # boolservices of remote connection; ignored by the remote host.
|
||||
if checkSocksIP(remoteHost) and server: # prevent leaking of tor outbound IP
|
||||
# boolservices of remote connection; ignored by the remote host.
|
||||
payload += pack('>q', 1)
|
||||
if checkSocksIP(remoteHost) and server:
|
||||
# prevent leaking of tor outbound IP
|
||||
payload += encodeHost('127.0.0.1')
|
||||
payload += pack('>H', 8444)
|
||||
else:
|
||||
|
@ -277,21 +331,25 @@ def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server=
|
|||
(NODE_SSL if haveSSL(server) else 0) |
|
||||
(NODE_DANDELION if state.dandelion else 0)
|
||||
)
|
||||
# = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
|
||||
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack('>L', 2130706433)
|
||||
# = 127.0.0.1. This will be ignored by the remote host.
|
||||
# The actual remote connected IP will be used.
|
||||
payload += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack(
|
||||
'>L', 2130706433)
|
||||
# we have a separate extPort and incoming over clearnet
|
||||
# or outgoing through clearnet
|
||||
extport = BMConfigParser().safeGetInt('bitmessagesettings', 'extport')
|
||||
if (
|
||||
extport and ((server and not checkSocksIP(remoteHost)) or (
|
||||
BMConfigParser().get('bitmessagesettings', 'socksproxytype') ==
|
||||
'none' and not server))
|
||||
BMConfigParser().get('bitmessagesettings', 'socksproxytype')
|
||||
== 'none' and not server))
|
||||
):
|
||||
payload += pack('>H', extport)
|
||||
elif checkSocksIP(remoteHost) and server: # incoming connection over Tor
|
||||
payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'onionport'))
|
||||
payload += pack(
|
||||
'>H', BMConfigParser().getint('bitmessagesettings', 'onionport'))
|
||||
else: # no extport and not incoming over Tor
|
||||
payload += pack('>H', BMConfigParser().getint('bitmessagesettings', 'port'))
|
||||
payload += pack(
|
||||
'>H', BMConfigParser().getint('bitmessagesettings', 'port'))
|
||||
|
||||
if nodeid is not None:
|
||||
payload += nodeid[0:8]
|
||||
|
@ -315,7 +373,10 @@ def assembleVersionMessage(remoteHost, remotePort, participatingStreams, server=
|
|||
|
||||
|
||||
def assembleErrorMessage(fatal=0, banTime=0, inventoryVector='', errorText=''):
|
||||
"""Construct the payload of an error message, return the resultng bytes of running CreatePacket() on it"""
|
||||
"""
|
||||
Construct the payload of an error message,
|
||||
return the resulting bytes of running `CreatePacket` on it
|
||||
"""
|
||||
payload = encodeVarint(fatal)
|
||||
payload += encodeVarint(banTime)
|
||||
payload += encodeVarint(len(inventoryVector))
|
||||
|
@ -452,7 +513,7 @@ def decryptAndCheckPubkeyPayload(data, address):
|
|||
except Exception:
|
||||
logger.critical(
|
||||
'Pubkey decryption was UNsuccessful because of'
|
||||
' an unhandled exception! This is definitely a bug! \n%s',
|
||||
traceback.format_exc()
|
||||
' an unhandled exception! This is definitely a bug!',
|
||||
exc_info=True
|
||||
)
|
||||
return 'failed'
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
"""
|
||||
src/pyelliptic/__init__.py
|
||||
=====================================
|
||||
Copyright (C) 2010
|
||||
Author: Yann GUIBET
|
||||
Contact: <yannguibet@gmail.com>
|
||||
|
||||
Python OpenSSL wrapper.
|
||||
For modern cryptography with ECC, AES, HMAC, Blowfish, ...
|
||||
|
||||
This is an abandoned package maintained inside of the PyBitmessage.
|
||||
"""
|
||||
# Copyright (C) 2010
|
||||
# Author: Yann GUIBET
|
||||
# Contact: <yannguibet@gmail.com>
|
||||
|
||||
from .openssl import OpenSSL
|
||||
from .ecc import ECC
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
src/pyelliptic/hash.py
|
||||
=====================
|
||||
Wrappers for hash functions from OpenSSL.
|
||||
"""
|
||||
# Copyright (C) 2011 Yann GUIBET <yannguibet@gmail.com>
|
||||
# See LICENSE for details.
|
||||
|
|
|
@ -1,26 +1,19 @@
|
|||
# Copyright (C) 2011 Yann GUIBET <yannguibet@gmail.com>
|
||||
# See LICENSE for details.
|
||||
#
|
||||
# Software slightly changed by Jonathan Warren <bitmessage at-symbol jonwarren.org>
|
||||
# pylint: disable=protected-access, import-error
|
||||
"""
|
||||
src/pyelliptic/openssl.py
|
||||
=================================
|
||||
This module loads openssl libs with ctypes and incapsulates
|
||||
needed openssl functionality in class _OpenSSL.
|
||||
"""
|
||||
# pylint: disable=import-error
|
||||
|
||||
import sys
|
||||
import ctypes
|
||||
from kivy.utils import platform
|
||||
|
||||
OpenSSL = None
|
||||
|
||||
# !/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
src/pyelliptic/openssl.py
|
||||
=====================
|
||||
"""
|
||||
# Copyright (C) 2011 Yann GUIBET <yannguibet@gmail.com>
|
||||
# See LICENSE for details.
|
||||
#
|
||||
# Software slightly changed by Jonathan Warren <bitmessage at-symbol jonwarren.org>
|
||||
# pylint: disable=protected-access
|
||||
|
||||
|
||||
class CipherName:
|
||||
"""Class returns cipher name, pointer and blocksize"""
|
||||
|
|
|
@ -1,23 +1,19 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding:utf-8 -*-
|
||||
# pylint: disable=too-many-locals,too-many-arguments,too-many-function-args
|
||||
"""
|
||||
= usage =
|
||||
Usage
|
||||
-----
|
||||
|
||||
== python ==
|
||||
>>> import qtidenticon
|
||||
>>> qtidenticon.render_identicon(code, size)
|
||||
|
||||
Return a PIL Image class instance which have generated identicon image.
|
||||
```size``` specifies `patch size`. Generated image size is 3 * ```size```.
|
||||
``size`` specifies `patch size`. Generated image size is 3 * ``size``.
|
||||
"""
|
||||
|
||||
from PyQt4 import QtGui
|
||||
from PyQt4.QtCore import QSize, QPointF, Qt
|
||||
from PyQt4.QtGui import QPixmap, QPainter, QPolygonF
|
||||
|
||||
__all__ = ['render_identicon', 'IdenticonRendererBase']
|
||||
|
||||
|
||||
class IdenticonRendererBase(object):
|
||||
"""Encapsulate methods around rendering identicons"""
|
||||
|
@ -26,7 +22,7 @@ class IdenticonRendererBase(object):
|
|||
|
||||
def __init__(self, code):
|
||||
"""
|
||||
@param code code for icon
|
||||
:param code: code for icon
|
||||
"""
|
||||
if not isinstance(code, int):
|
||||
code = int(code)
|
||||
|
@ -36,8 +32,8 @@ class IdenticonRendererBase(object):
|
|||
"""
|
||||
render identicon to QPicture
|
||||
|
||||
@param size identicon patchsize. (image size is 3 * [size])
|
||||
@return QPicture
|
||||
:param size: identicon patchsize. (image size is 3 * [size])
|
||||
:returns: :class:`QPicture`
|
||||
"""
|
||||
|
||||
# decode the code
|
||||
|
@ -79,7 +75,7 @@ class IdenticonRendererBase(object):
|
|||
def drawPatchQt(self, pos, turn, invert, patch_type, image, size, foreColor,
|
||||
backColor, penwidth): # pylint: disable=unused-argument
|
||||
"""
|
||||
@param size patch size
|
||||
:param size: patch size
|
||||
"""
|
||||
path = self.PATH_SET[patch_type]
|
||||
if not path:
|
||||
|
@ -134,7 +130,7 @@ class IdenticonRendererBase(object):
|
|||
class DonRenderer(IdenticonRendererBase):
|
||||
"""
|
||||
Don Park's implementation of identicon
|
||||
see : http://www.docuverse.com/blog/donpark/2007/01/19/identicon-updated-and-source-released
|
||||
see: http://www.docuverse.com/blog/donpark/2007/01/19/identicon-updated-and-source-released
|
||||
"""
|
||||
|
||||
PATH_SET = [
|
||||
|
|
|
@ -1,20 +1,51 @@
|
|||
import Queue
|
||||
"""Most of the queues used by bitmessage threads are defined here."""
|
||||
|
||||
import Queue
|
||||
import threading
|
||||
import time
|
||||
|
||||
from class_objectProcessorQueue import ObjectProcessorQueue
|
||||
from multiqueue import MultiQueue
|
||||
|
||||
|
||||
class ObjectProcessorQueue(Queue.Queue):
|
||||
"""Special queue class using lock for `.threads.objectProcessor`"""
|
||||
|
||||
maxSize = 32000000
|
||||
|
||||
def __init__(self):
|
||||
Queue.Queue.__init__(self)
|
||||
self.sizeLock = threading.Lock()
|
||||
#: in Bytes. We maintain this to prevent nodes from flooding us
|
||||
#: with objects which take up too much memory. If this gets
|
||||
#: too big we'll sleep before asking for further objects.
|
||||
self.curSize = 0
|
||||
|
||||
def put(self, item, block=True, timeout=None):
|
||||
while self.curSize >= self.maxSize:
|
||||
time.sleep(1)
|
||||
with self.sizeLock:
|
||||
self.curSize += len(item[1])
|
||||
Queue.Queue.put(self, item, block, timeout)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
item = Queue.Queue.get(self, block, timeout)
|
||||
with self.sizeLock:
|
||||
self.curSize -= len(item[1])
|
||||
return item
|
||||
|
||||
|
||||
workerQueue = Queue.Queue()
|
||||
UISignalQueue = Queue.Queue()
|
||||
addressGeneratorQueue = Queue.Queue()
|
||||
# receiveDataThreads dump objects they hear on the network into this
|
||||
# queue to be processed.
|
||||
#: `.network.ReceiveQueueThread` instances dump objects they hear
|
||||
#: on the network into this queue to be processed.
|
||||
objectProcessorQueue = ObjectProcessorQueue()
|
||||
invQueue = MultiQueue()
|
||||
addrQueue = MultiQueue()
|
||||
portCheckerQueue = Queue.Queue()
|
||||
receiveDataQueue = Queue.Queue()
|
||||
# The address generator thread uses this queue to get information back
|
||||
# to the API thread.
|
||||
#: The address generator thread uses this queue to get information back
|
||||
#: to the API thread.
|
||||
apiAddressGeneratorReturnQueue = Queue.Queue()
|
||||
# Exceptions
|
||||
#: for exceptions
|
||||
excQueue = Queue.Queue()
|
||||
|
|
116
src/shared.py
116
src/shared.py
|
@ -1,21 +1,28 @@
|
|||
from __future__ import division
|
||||
"""
|
||||
Some shared functions
|
||||
|
||||
.. deprecated:: 0.6.3
|
||||
Should be moved to different places and this file removed,
|
||||
but it needs refactoring.
|
||||
"""
|
||||
from __future__ import division
|
||||
|
||||
# Libraries.
|
||||
import hashlib
|
||||
import os
|
||||
import sys
|
||||
import stat
|
||||
import threading
|
||||
import hashlib
|
||||
import subprocess
|
||||
from binascii import hexlify
|
||||
from pyelliptic import arithmetic
|
||||
from kivy.utils import platform
|
||||
# Project imports.
|
||||
import state
|
||||
import highlevelcrypto
|
||||
import state
|
||||
from addresses import decodeAddress, encodeVarint
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from addresses import decodeAddress, encodeVarint
|
||||
from helper_sql import sqlQuery
|
||||
|
||||
|
||||
|
@ -56,6 +63,7 @@ maximumLengthOfTimeToBotherResendingMessages = 0
|
|||
|
||||
|
||||
def isAddressInMyAddressBook(address):
|
||||
"""Is address in my addressbook?"""
|
||||
queryreturn = sqlQuery(
|
||||
'''select address from addressbook where address=?''',
|
||||
address)
|
||||
|
@ -64,6 +72,7 @@ def isAddressInMyAddressBook(address):
|
|||
|
||||
# At this point we should really just have a isAddressInMy(book, address)...
|
||||
def isAddressInMySubscriptionsList(address):
|
||||
"""Am I subscribed to this address?"""
|
||||
queryreturn = sqlQuery(
|
||||
'''select * from subscriptions where address=?''',
|
||||
str(address))
|
||||
|
@ -71,6 +80,9 @@ def isAddressInMySubscriptionsList(address):
|
|||
|
||||
|
||||
def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address):
|
||||
"""
|
||||
Am I subscribed to this address, is it in my addressbook or whitelist?
|
||||
"""
|
||||
if isAddressInMyAddressBook(address):
|
||||
return True
|
||||
|
||||
|
@ -91,6 +103,11 @@ def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address):
|
|||
|
||||
|
||||
def decodeWalletImportFormat(WIFstring):
|
||||
# pylint: disable=inconsistent-return-statements
|
||||
"""
|
||||
Convert private key from base58 that's used in the config file to
|
||||
8-bit binary string
|
||||
"""
|
||||
fullString = arithmetic.changebase(WIFstring, 58, 256)
|
||||
privkey = fullString[:-4]
|
||||
if fullString[-4:] != \
|
||||
|
@ -101,7 +118,7 @@ def decodeWalletImportFormat(WIFstring):
|
|||
' 6 characters of the PRIVATE key: %s',
|
||||
str(WIFstring)[:6]
|
||||
)
|
||||
os._exit(0)
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
# return ""
|
||||
elif privkey[0] == '\x80': # checksum passed
|
||||
return privkey[1:]
|
||||
|
@ -111,10 +128,11 @@ def decodeWalletImportFormat(WIFstring):
|
|||
' the checksum passed but the key doesn\'t begin with hex 80.'
|
||||
' Here is the PRIVATE key: %s', WIFstring
|
||||
)
|
||||
os._exit(0)
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
|
||||
|
||||
def reloadMyAddressHashes():
|
||||
"""Reload keys for user's addresses from the config file"""
|
||||
logger.debug('reloading keys from keys.dat file')
|
||||
|
||||
myECCryptorObjects.clear()
|
||||
|
@ -122,33 +140,29 @@ def reloadMyAddressHashes():
|
|||
myAddressesByTag.clear()
|
||||
# myPrivateKeys.clear()
|
||||
|
||||
keyfileSecure = checkSensitiveFilePermissions(state.appdata + 'keys.dat')
|
||||
keyfileSecure = checkSensitiveFilePermissions(os.path.join(
|
||||
state.appdata, 'keys.dat'))
|
||||
hasEnabledKeys = False
|
||||
for addressInKeysFile in BMConfigParser().addresses():
|
||||
isEnabled = BMConfigParser().getboolean(addressInKeysFile, 'enabled')
|
||||
if isEnabled:
|
||||
hasEnabledKeys = True
|
||||
# status
|
||||
_, addressVersionNumber, streamNumber, hash = \
|
||||
decodeAddress(addressInKeysFile)
|
||||
addressVersionNumber, streamNumber, hashobj = decodeAddress(addressInKeysFile)[1:]
|
||||
if addressVersionNumber in (2, 3, 4):
|
||||
# Returns a simple 32 bytes of information encoded
|
||||
# in 64 Hex characters, or null if there was an error.
|
||||
privEncryptionKey = hexlify(decodeWalletImportFormat(
|
||||
BMConfigParser().get(addressInKeysFile, 'privencryptionkey'))
|
||||
)
|
||||
|
||||
BMConfigParser().get(addressInKeysFile, 'privencryptionkey')))
|
||||
# It is 32 bytes encoded as 64 hex characters
|
||||
if len(privEncryptionKey) == 64:
|
||||
myECCryptorObjects[hash] = \
|
||||
myECCryptorObjects[hashobj] = \
|
||||
highlevelcrypto.makeCryptor(privEncryptionKey)
|
||||
myAddressesByHash[hash] = addressInKeysFile
|
||||
myAddressesByHash[hashobj] = addressInKeysFile
|
||||
tag = hashlib.sha512(hashlib.sha512(
|
||||
encodeVarint(addressVersionNumber) +
|
||||
encodeVarint(streamNumber) + hash).digest()
|
||||
).digest()[32:]
|
||||
encodeVarint(streamNumber) + hashobj).digest()).digest()[32:]
|
||||
myAddressesByTag[tag] = addressInKeysFile
|
||||
|
||||
else:
|
||||
logger.error(
|
||||
'Error in reloadMyAddressHashes: Can\'t handle'
|
||||
|
@ -161,6 +175,10 @@ def reloadMyAddressHashes():
|
|||
|
||||
|
||||
def reloadBroadcastSendersForWhichImWatching():
|
||||
"""
|
||||
Reinitialize runtime data for the broadcasts I'm subscribed to
|
||||
from the config file
|
||||
"""
|
||||
broadcastSendersForWhichImWatching.clear()
|
||||
MyECSubscriptionCryptorObjects.clear()
|
||||
queryreturn = sqlQuery('SELECT address FROM subscriptions where enabled=1')
|
||||
|
@ -168,9 +186,9 @@ def reloadBroadcastSendersForWhichImWatching():
|
|||
for row in queryreturn:
|
||||
address, = row
|
||||
# status
|
||||
_, addressVersionNumber, streamNumber, hash = decodeAddress(address)
|
||||
addressVersionNumber, streamNumber, hashobj = decodeAddress(address)[1:]
|
||||
if addressVersionNumber == 2:
|
||||
broadcastSendersForWhichImWatching[hash] = 0
|
||||
broadcastSendersForWhichImWatching[hashobj] = 0
|
||||
# Now, for all addresses, even version 2 addresses,
|
||||
# we should create Cryptor objects in a dictionary which we will
|
||||
# use to attempt to decrypt encrypted broadcast messages.
|
||||
|
@ -178,14 +196,14 @@ def reloadBroadcastSendersForWhichImWatching():
|
|||
if addressVersionNumber <= 3:
|
||||
privEncryptionKey = hashlib.sha512(
|
||||
encodeVarint(addressVersionNumber) +
|
||||
encodeVarint(streamNumber) + hash
|
||||
encodeVarint(streamNumber) + hashobj
|
||||
).digest()[:32]
|
||||
MyECSubscriptionCryptorObjects[hash] = \
|
||||
MyECSubscriptionCryptorObjects[hashobj] = \
|
||||
highlevelcrypto.makeCryptor(hexlify(privEncryptionKey))
|
||||
else:
|
||||
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(
|
||||
encodeVarint(addressVersionNumber) +
|
||||
encodeVarint(streamNumber) + hash
|
||||
encodeVarint(streamNumber) + hashobj
|
||||
).digest()).digest()
|
||||
tag = doubleHashOfAddressData[32:]
|
||||
privEncryptionKey = doubleHashOfAddressData[:32]
|
||||
|
@ -194,21 +212,22 @@ def reloadBroadcastSendersForWhichImWatching():
|
|||
|
||||
|
||||
def fixPotentiallyInvalidUTF8Data(text):
|
||||
"""Sanitise invalid UTF-8 strings"""
|
||||
try:
|
||||
unicode(text, 'utf-8')
|
||||
return text
|
||||
except:
|
||||
return 'Part of the message is corrupt. The message cannot be' \
|
||||
' displayed the normal way.\n\n' + repr(text)
|
||||
' displayed the normal way.\n\n' + repr(text)
|
||||
|
||||
|
||||
# Checks sensitive file permissions for inappropriate umask
|
||||
# during keys.dat creation. (Or unwise subsequent chmod.)
|
||||
#
|
||||
# Returns true iff file appears to have appropriate permissions.
|
||||
def checkSensitiveFilePermissions(filename):
|
||||
"""
|
||||
:param str filename: path to the file
|
||||
:return: True if file appears to have appropriate permissions.
|
||||
"""
|
||||
if sys.platform == 'win32':
|
||||
# TODO: This might deserve extra checks by someone familiar with
|
||||
# .. todo:: This might deserve extra checks by someone familiar with
|
||||
# Windows systems.
|
||||
return True
|
||||
elif sys.platform[:7] == 'freebsd':
|
||||
|
@ -216,30 +235,30 @@ def checkSensitiveFilePermissions(filename):
|
|||
present_permissions = os.stat(filename)[0]
|
||||
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
||||
return present_permissions & disallowed_permissions == 0
|
||||
else:
|
||||
try:
|
||||
# Skip known problems for non-Win32 filesystems
|
||||
# without POSIX permissions.
|
||||
fstype = subprocess.check_output(
|
||||
'stat -f -c "%%T" %s' % (filename),
|
||||
shell=True,
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
if 'fuseblk' in fstype:
|
||||
logger.info(
|
||||
'Skipping file permissions check for %s.'
|
||||
' Filesystem fuseblk detected.', filename)
|
||||
return True
|
||||
except:
|
||||
# Swallow exception here, but we might run into trouble later!
|
||||
logger.error('Could not determine filesystem type. %s', filename)
|
||||
present_permissions = os.stat(filename)[0]
|
||||
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
||||
return present_permissions & disallowed_permissions == 0
|
||||
try:
|
||||
# Skip known problems for non-Win32 filesystems
|
||||
# without POSIX permissions.
|
||||
fstype = subprocess.check_output(
|
||||
'stat -f -c "%%T" %s' % (filename),
|
||||
shell=True,
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
if 'fuseblk' in fstype:
|
||||
logger.info(
|
||||
'Skipping file permissions check for %s.'
|
||||
' Filesystem fuseblk detected.', filename)
|
||||
return True
|
||||
except:
|
||||
# Swallow exception here, but we might run into trouble later!
|
||||
logger.error('Could not determine filesystem type. %s', filename)
|
||||
present_permissions = os.stat(filename)[0]
|
||||
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
|
||||
return present_permissions & disallowed_permissions == 0
|
||||
|
||||
|
||||
# Fixes permissions on a sensitive file.
|
||||
def fixSensitiveFilePermissions(filename, hasEnabledKeys):
|
||||
"""Try to change file permissions to be more restrictive"""
|
||||
if hasEnabledKeys:
|
||||
logger.warning(
|
||||
'Keyfile had insecure permissions, and there were enabled'
|
||||
|
@ -264,6 +283,7 @@ def fixSensitiveFilePermissions(filename, hasEnabledKeys):
|
|||
|
||||
|
||||
def openKeysFile():
|
||||
"""Open keys file with an external editor"""
|
||||
if 'linux' in sys.platform:
|
||||
subprocess.call(["xdg-open", state.appdata + 'keys.dat'])
|
||||
else:
|
||||
|
|
|
@ -1,22 +1,24 @@
|
|||
"""shutdown function"""
|
||||
import os
|
||||
import Queue
|
||||
import threading
|
||||
import time
|
||||
|
||||
from debug import logger
|
||||
from helper_sql import sqlQuery, sqlStoredProcedure
|
||||
from helper_threading import StoppableThread
|
||||
from knownnodes import saveKnownNodes
|
||||
from inventory import Inventory
|
||||
from queues import (
|
||||
addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue)
|
||||
import shared
|
||||
import state
|
||||
from debug import logger
|
||||
from helper_sql import sqlQuery, sqlStoredProcedure
|
||||
from inventory import Inventory
|
||||
from knownnodes import saveKnownNodes
|
||||
from network import StoppableThread
|
||||
from queues import (
|
||||
addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue)
|
||||
|
||||
|
||||
def doCleanShutdown():
|
||||
# Used to tell proof of work worker threads
|
||||
# and the objectProcessorThread to exit.
|
||||
"""
|
||||
Used to tell all the treads to finish work and exit.
|
||||
"""
|
||||
state.shutdown = 1
|
||||
|
||||
objectProcessorQueue.put(('checkShutdownVariable', 'no data'))
|
||||
|
@ -52,9 +54,11 @@ def doCleanShutdown():
|
|||
time.sleep(.25)
|
||||
|
||||
for thread in threading.enumerate():
|
||||
if (thread is not threading.currentThread() and
|
||||
isinstance(thread, StoppableThread) and
|
||||
thread.name != 'SQL'):
|
||||
if (
|
||||
thread is not threading.currentThread()
|
||||
and isinstance(thread, StoppableThread)
|
||||
and thread.name != 'SQL'
|
||||
):
|
||||
logger.debug("Waiting for thread %s", thread.name)
|
||||
thread.join()
|
||||
|
||||
|
@ -76,10 +80,10 @@ def doCleanShutdown():
|
|||
except Queue.Empty:
|
||||
break
|
||||
|
||||
if shared.thisapp.daemon or not state.enableGUI: # FIXME redundant?
|
||||
if shared.thisapp.daemon or not state.enableGUI: # ..fixme:: redundant?
|
||||
logger.info('Clean shutdown complete.')
|
||||
shared.thisapp.cleanup()
|
||||
os._exit(0)
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
else:
|
||||
logger.info('Core shutdown complete.')
|
||||
for thread in threading.enumerate():
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
#! /usr/bin/env python
|
||||
"""
|
||||
This is based upon the singleton class from
|
||||
`tendo <https://github.com/pycontribs/tendo>`_
|
||||
which is under the Python Software Foundation License version 2
|
||||
"""
|
||||
|
||||
import atexit
|
||||
import os
|
||||
import sys
|
||||
|
||||
import state
|
||||
|
||||
try:
|
||||
|
@ -11,15 +16,10 @@ except ImportError:
|
|||
pass
|
||||
|
||||
|
||||
|
||||
class singleinstance:
|
||||
class singleinstance(object):
|
||||
"""
|
||||
Implements a single instance application by creating a lock file
|
||||
at appdata.
|
||||
|
||||
This is based upon the singleton class from tendo
|
||||
https://github.com/pycontribs/tendo
|
||||
which is under the Python Software Foundation License version 2
|
||||
"""
|
||||
def __init__(self, flavor_id="", daemon=False):
|
||||
self.initialized = False
|
||||
|
@ -40,6 +40,7 @@ class singleinstance:
|
|||
atexit.register(self.cleanup)
|
||||
|
||||
def lock(self):
|
||||
"""Obtain single instance lock"""
|
||||
if self.lockPid is None:
|
||||
self.lockPid = os.getpid()
|
||||
if sys.platform == 'win32':
|
||||
|
@ -52,8 +53,7 @@ class singleinstance:
|
|||
self.lockfile,
|
||||
os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC
|
||||
)
|
||||
except OSError:
|
||||
type, e, tb = sys.exc_info()
|
||||
except OSError as e:
|
||||
if e.errno == 13:
|
||||
print(
|
||||
'Another instance of this application'
|
||||
|
@ -84,6 +84,7 @@ class singleinstance:
|
|||
self.fp.flush()
|
||||
|
||||
def cleanup(self):
|
||||
"""Release single instance lock"""
|
||||
if not self.initialized:
|
||||
return
|
||||
if self.daemon and self.lockPid == os.getpid():
|
||||
|
@ -94,7 +95,7 @@ class singleinstance:
|
|||
os.close(self.fd)
|
||||
else:
|
||||
fcntl.lockf(self.fp, fcntl.LOCK_UN)
|
||||
except Exception, e:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return
|
||||
|
|
|
@ -1,6 +1,21 @@
|
|||
"""
|
||||
Singleton decorator definition
|
||||
"""
|
||||
|
||||
from functools import wraps
|
||||
|
||||
|
||||
def Singleton(cls):
|
||||
"""
|
||||
Decorator implementing the singleton pattern:
|
||||
it restricts the instantiation of a class to one "single" instance.
|
||||
"""
|
||||
instances = {}
|
||||
|
||||
# https://github.com/sphinx-doc/sphinx/issues/3783
|
||||
@wraps(cls)
|
||||
def getinstance():
|
||||
"""Find an instance or save newly created one"""
|
||||
if cls not in instances:
|
||||
instances[cls] = cls()
|
||||
return instances[cls]
|
||||
|
|
45
src/state.py
45
src/state.py
|
@ -1,30 +1,43 @@
|
|||
"""
|
||||
src/state.py
|
||||
=================================
|
||||
Global runtime variables.
|
||||
"""
|
||||
import collections
|
||||
|
||||
neededPubkeys = {}
|
||||
streamsInWhichIAmParticipating = []
|
||||
# For UPnP
|
||||
|
||||
extPort = None
|
||||
# for Tor hidden service
|
||||
"""For UPnP"""
|
||||
|
||||
socksIP = None
|
||||
# Network protocols availability, initialised below
|
||||
networkProtocolAvailability = None
|
||||
appdata = '' # holds the location of the application data storage directory
|
||||
# Set to 1 by the doCleanShutdown function.
|
||||
# Used to tell the proof of work worker threads to exit.
|
||||
"""for Tor hidden service"""
|
||||
|
||||
appdata = ''
|
||||
"""holds the location of the application data storage directory"""
|
||||
|
||||
shutdown = 0
|
||||
"""
|
||||
Set to 1 by the `.shutdown.doCleanShutdown` function.
|
||||
Used to tell the threads to exit.
|
||||
"""
|
||||
|
||||
# Component control flags - set on startup, do not change during runtime
|
||||
# The defaults are for standalone GUI (default operating mode)
|
||||
enableNetwork = True # enable network threads
|
||||
enableObjProc = True # enable object processing threads
|
||||
enableAPI = True # enable API (if configured)
|
||||
enableGUI = True # enable GUI (QT or ncurses)
|
||||
enableSTDIO = False # enable STDIO threads
|
||||
enableNetwork = True
|
||||
"""enable network threads"""
|
||||
enableObjProc = True
|
||||
"""enable object processing thread"""
|
||||
enableAPI = True
|
||||
"""enable API (if configured)"""
|
||||
enableGUI = True
|
||||
"""enable GUI (QT or ncurses)"""
|
||||
enableSTDIO = False
|
||||
"""enable STDIO threads"""
|
||||
curses = False
|
||||
sqlReady = False # set to true by sqlTread when ready for processing
|
||||
|
||||
sqlReady = False
|
||||
"""set to true by `.threads.sqlThread` when ready for processing"""
|
||||
|
||||
maximumNumberOfHalfOpenConnections = 0
|
||||
invThread = None
|
||||
addrThread = None
|
||||
|
@ -55,6 +68,8 @@ def resetNetworkProtocolAvailability():
|
|||
|
||||
resetNetworkProtocolAvailability()
|
||||
|
||||
discoveredPeers = {}
|
||||
|
||||
dandelion = 0
|
||||
|
||||
testmode = False
|
||||
|
|
|
@ -17,6 +17,7 @@ from bmconfigparser import BMConfigParser
|
|||
from helper_msgcoding import MsgEncode, MsgDecode
|
||||
from network import asyncore_pollchoose as asyncore
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network.node import Peer
|
||||
from network.tcp import Socks4aBMConnection, Socks5BMConnection, TCPConnection
|
||||
from queues import excQueue
|
||||
|
||||
|
@ -30,7 +31,7 @@ def pickle_knownnodes():
|
|||
with open(knownnodes_file, 'wb') as dst:
|
||||
pickle.dump({
|
||||
stream: {
|
||||
state.Peer(
|
||||
Peer(
|
||||
'%i.%i.%i.%i' % tuple([
|
||||
random.randint(1, 255) for i in range(4)]),
|
||||
8444): {'lastseen': now, 'rating': 0.1}
|
||||
|
@ -90,7 +91,7 @@ class TestCore(unittest.TestCase):
|
|||
"""initial fill script from network.tcp"""
|
||||
BMConfigParser().set('bitmessagesettings', 'dontconnect', 'true')
|
||||
try:
|
||||
for peer in (state.Peer("127.0.0.1", 8448),):
|
||||
for peer in (Peer("127.0.0.1", 8448),):
|
||||
direct = TCPConnection(peer)
|
||||
while asyncore.socket_map:
|
||||
print("loop, state = %s" % direct.state)
|
||||
|
@ -147,7 +148,7 @@ class TestCore(unittest.TestCase):
|
|||
def _initiate_bootstrap(self):
|
||||
BMConfigParser().set('bitmessagesettings', 'dontconnect', 'true')
|
||||
self._outdate_knownnodes()
|
||||
knownnodes.addKnownNode(1, state.Peer('127.0.0.1', 8444), is_self=True)
|
||||
knownnodes.addKnownNode(1, Peer('127.0.0.1', 8444), is_self=True)
|
||||
knownnodes.cleanupKnownNodes()
|
||||
time.sleep(2)
|
||||
|
||||
|
@ -173,6 +174,22 @@ class TestCore(unittest.TestCase):
|
|||
self.fail(
|
||||
'Failed to connect during %s sec' % (time.time() - _started))
|
||||
|
||||
def test_onionservicesonly(self):
|
||||
"""test onionservicesonly networking mode"""
|
||||
BMConfigParser().set('bitmessagesettings', 'onionservicesonly', 'true')
|
||||
self._initiate_bootstrap()
|
||||
BMConfigParser().remove_option('bitmessagesettings', 'dontconnect')
|
||||
for _ in range(360):
|
||||
time.sleep(1)
|
||||
for n, peer in enumerate(BMConnectionPool().outboundConnections):
|
||||
if n > 2:
|
||||
return
|
||||
if not peer.host.endswith('.onion'):
|
||||
self.fail(
|
||||
'Found non onion hostname %s in outbound connections!'
|
||||
% peer.host)
|
||||
self.fail('Failed to connect to at least 3 nodes within 360 sec')
|
||||
|
||||
def test_bootstrap(self):
|
||||
"""test bootstrapping"""
|
||||
self._initiate_bootstrap()
|
||||
|
|
69
src/tests/test_logger.py
Normal file
69
src/tests/test_logger.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
"""
|
||||
Testing the logger configuration
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
|
||||
class TestLogger(unittest.TestCase):
|
||||
"""A test case for bmconfigparser"""
|
||||
|
||||
conf_template = '''
|
||||
[loggers]
|
||||
keys=root
|
||||
|
||||
[handlers]
|
||||
keys=default
|
||||
|
||||
[formatters]
|
||||
keys=default
|
||||
|
||||
[formatter_default]
|
||||
format=%(asctime)s {1} %(message)s
|
||||
|
||||
[handler_default]
|
||||
class=FileHandler
|
||||
level=NOTSET
|
||||
formatter=default
|
||||
args=('{0}', 'w')
|
||||
|
||||
[logger_root]
|
||||
level=DEBUG
|
||||
handlers=default
|
||||
'''
|
||||
|
||||
def test_fileConfig(self):
|
||||
"""Put logging.dat with special pattern and check it was used"""
|
||||
tmp = os.environ['BITMESSAGE_HOME'] = tempfile.gettempdir()
|
||||
log_config = os.path.join(tmp, 'logging.dat')
|
||||
log_file = os.path.join(tmp, 'debug.log')
|
||||
|
||||
def gen_log_config(pattern):
|
||||
"""A small closure to generate logging.dat with custom pattern"""
|
||||
with open(log_config, 'wb') as dst:
|
||||
dst.write(self.conf_template.format(log_file, pattern))
|
||||
|
||||
pattern = r' o_0 '
|
||||
gen_log_config(pattern)
|
||||
|
||||
try:
|
||||
from pybitmessage.debug import logger, resetLogging
|
||||
if not os.path.isfile(log_file): # second pass
|
||||
pattern = r' <===> '
|
||||
gen_log_config(pattern)
|
||||
resetLogging()
|
||||
except ImportError:
|
||||
self.fail('There is no package pybitmessage. Things gone wrong.')
|
||||
finally:
|
||||
os.remove(log_config)
|
||||
|
||||
logger_ = logging.getLogger('default')
|
||||
|
||||
self.assertEqual(logger, logger_)
|
||||
|
||||
logger_.info('Testing the logger...')
|
||||
|
||||
self.assertRegexpMatches(open(log_file).read(), pattern)
|
39
src/tests/test_networkgroup.py
Normal file
39
src/tests/test_networkgroup.py
Normal file
|
@ -0,0 +1,39 @@
|
|||
"""
|
||||
Test for network group
|
||||
"""
|
||||
import unittest
|
||||
|
||||
|
||||
class TestNetworkGroup(unittest.TestCase):
|
||||
"""
|
||||
Test case for network group
|
||||
"""
|
||||
def test_network_group(self):
|
||||
"""Test various types of network groups"""
|
||||
from pybitmessage.protocol import network_group
|
||||
|
||||
test_ip = '1.2.3.4'
|
||||
self.assertEqual('\x01\x02', network_group(test_ip))
|
||||
|
||||
test_ip = '127.0.0.1'
|
||||
self.assertEqual('IPv4', network_group(test_ip))
|
||||
|
||||
test_ip = '0102:0304:0506:0708:090A:0B0C:0D0E:0F10'
|
||||
self.assertEqual(
|
||||
'\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C',
|
||||
network_group(test_ip))
|
||||
|
||||
test_ip = 'bootstrap8444.bitmessage.org'
|
||||
self.assertEqual(
|
||||
'bootstrap8444.bitmessage.org',
|
||||
network_group(test_ip))
|
||||
|
||||
test_ip = 'quzwelsuziwqgpt2.onion'
|
||||
self.assertEqual(
|
||||
test_ip,
|
||||
network_group(test_ip))
|
||||
|
||||
test_ip = None
|
||||
self.assertEqual(
|
||||
None,
|
||||
network_group(test_ip))
|
46
src/threads.py
Normal file
46
src/threads.py
Normal file
|
@ -0,0 +1,46 @@
|
|||
"""
|
||||
PyBitmessage does various tasks in separate threads. Most of them inherit
|
||||
from `.network.StoppableThread`. There are `addressGenerator` for
|
||||
addresses generation, `objectProcessor` for processing the network objects
|
||||
passed minimal validation, `singleCleaner` to periodically clean various
|
||||
internal storages (like inventory and knownnodes) and do forced garbage
|
||||
collection, `singleWorker` for doing PoW, `sqlThread` for querying sqlite
|
||||
database.
|
||||
|
||||
There are also other threads in the `.network` package.
|
||||
|
||||
:func:`set_thread_name` is defined here for the threads that don't inherit from
|
||||
:class:`.network.StoppableThread`
|
||||
"""
|
||||
|
||||
import threading
|
||||
|
||||
try:
|
||||
import prctl
|
||||
except ImportError:
|
||||
def set_thread_name(name):
|
||||
"""Set a name for the thread for python internal use."""
|
||||
threading.current_thread().name = name
|
||||
else:
|
||||
def set_thread_name(name):
|
||||
"""Set the thread name for external use (visible from the OS)."""
|
||||
prctl.set_name(name)
|
||||
|
||||
def _thread_name_hack(self):
|
||||
set_thread_name(self.name)
|
||||
threading.Thread.__bootstrap_original__(self)
|
||||
# pylint: disable=protected-access
|
||||
threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap
|
||||
threading.Thread._Thread__bootstrap = _thread_name_hack
|
||||
|
||||
from class_addressGenerator import addressGenerator
|
||||
from class_objectProcessor import objectProcessor
|
||||
from class_singleCleaner import singleCleaner
|
||||
from class_singleWorker import singleWorker
|
||||
from class_sqlThread import sqlThread
|
||||
|
||||
|
||||
__all__ = [
|
||||
"addressGenerator", "objectProcessor", "singleCleaner", "singleWorker",
|
||||
"sqlThread"
|
||||
]
|
30
src/tr.py
30
src/tr.py
|
@ -1,21 +1,17 @@
|
|||
"""
|
||||
src/tr.py
|
||||
=================================
|
||||
Translating text
|
||||
"""
|
||||
# pylint: disable=relative-import
|
||||
import os
|
||||
|
||||
import state
|
||||
|
||||
"""This is used so that the translateText function can be used """
|
||||
"""when we are in daemon mode and not using any QT functions."""
|
||||
|
||||
|
||||
class translateClass: # pylint: disable=old-style-class, too-few-public-methods
|
||||
class translateClass:
|
||||
"""
|
||||
This is used so that the translateText function can be used when we are
|
||||
in daemon mode and not using any QT functions.
|
||||
This is used so that the translateText function can be used
|
||||
when we are in daemon mode and not using any QT functions.
|
||||
"""
|
||||
# pylint: disable=old-style-class,too-few-public-methods
|
||||
def __init__(self, context, text):
|
||||
self.context = context
|
||||
self.text = text
|
||||
|
@ -29,12 +25,9 @@ class translateClass: # pylint: disable=old-style-class, too-few-public-m
|
|||
return self.text
|
||||
|
||||
|
||||
def _translate(context, text, disambiguation=None, encoding=None, n=None): # pylint: disable=unused-argument
|
||||
def _translate(context, text, disambiguation=None, encoding=None, n=None): # pylint: disable=unused-argument
|
||||
return translateText(context, text, n)
|
||||
|
||||
# def _translate(context, text, disambiguation = None, encoding = None, n = None):
|
||||
# return translateClass(context, text.replace('%','',1))
|
||||
|
||||
|
||||
def translateText(context, text, n=None):
|
||||
"""Translate text in context"""
|
||||
|
@ -46,12 +39,13 @@ def translateText(context, text, n=None):
|
|||
try:
|
||||
from PyQt4 import QtCore, QtGui
|
||||
except Exception as err:
|
||||
print 'PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API\
|
||||
.You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download\
|
||||
or by searching Google for \'PyQt Download\'.\
|
||||
If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon'
|
||||
print 'PyBitmessage requires PyQt unless you want to run it as a daemon'\
|
||||
' and interact with it using the API.'\
|
||||
' You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download'\
|
||||
' or by searching Google for \'PyQt Download\'.'\
|
||||
' If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon'
|
||||
print 'Error message:', err
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
os._exit(0) # pylint: disable=protected-access
|
||||
if n is None:
|
||||
return QtGui.QApplication.translate(context, text)
|
||||
return QtGui.QApplication.translate(context, text, None, QtCore.QCoreApplication.CodecForTr, n)
|
||||
|
|
11
src/upnp.py
11
src/upnp.py
|
@ -1,9 +1,6 @@
|
|||
# pylint: disable=too-many-statements,too-many-branches,protected-access,no-self-use
|
||||
"""
|
||||
src/upnp.py
|
||||
===========
|
||||
|
||||
A simple upnp module to forward port for BitMessage
|
||||
Complete UPnP port forwarding implementation in separate thread.
|
||||
Reference: http://mattscodecave.com/posts/using-python-and-upnp-to-forward-a-port
|
||||
"""
|
||||
|
||||
|
@ -21,8 +18,8 @@ import state
|
|||
import tr
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from helper_threading import StoppableThread
|
||||
from network.connectionpool import BMConnectionPool
|
||||
from network import BMConnectionPool, StoppableThread
|
||||
from network.node import Peer
|
||||
|
||||
|
||||
def createRequestXML(service, action, arguments=None):
|
||||
|
@ -263,7 +260,7 @@ class uPnPThread(StoppableThread):
|
|||
self.routers.append(newRouter)
|
||||
self.createPortMapping(newRouter)
|
||||
try:
|
||||
self_peer = state.Peer(
|
||||
self_peer = Peer(
|
||||
newRouter.GetExternalIPAddress(),
|
||||
self.extPort
|
||||
)
|
||||
|
|
Reference in New Issue
Block a user