From ed1c8ca1007974b8e8ce1861ad624c9eb03a2751 Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Mon, 15 Nov 2021 21:50:35 +0530 Subject: [PATCH 01/10] Added mock code for class_objectProcessor, class_singleWorker, inventory, connectionpool & stats --- src/mock/class_objectProcessor.py | 50 ++++++++++++++++++++++++++++++ src/mock/class_singleWorker.py | 45 +++++++++++++++++++++++++++ src/mock/inventory.py | 14 +++++++++ src/mock/network/connectionpool.py | 25 +++++++++++++++ src/mock/network/stats.py | 13 ++++++++ 5 files changed, 147 insertions(+) create mode 100644 src/mock/class_objectProcessor.py create mode 100644 src/mock/class_singleWorker.py create mode 100644 src/mock/inventory.py create mode 100644 src/mock/network/connectionpool.py create mode 100644 src/mock/network/stats.py diff --git a/src/mock/class_objectProcessor.py b/src/mock/class_objectProcessor.py new file mode 100644 index 00000000..84958bb3 --- /dev/null +++ b/src/mock/class_objectProcessor.py @@ -0,0 +1,50 @@ +""" +The objectProcessor thread, of which there is only one, +processes the network objects +""" +import logging +import random +import threading + +import queues +import state + +from helper_sql import sql_ready, sqlExecute, sqlQuery +from network import bmproto + +logger = logging.getLogger('default') + + +class objectProcessor(threading.Thread): + """ + The objectProcessor thread, of which there is only one, receives network + objects (msg, broadcast, pubkey, getpubkey) from the receiveDataThreads. + """ + def __init__(self): + threading.Thread.__init__(self, name="objectProcessor") + random.seed() + # It may be the case that the last time Bitmessage was running, + # the user closed it before it finished processing everything in the + # objectProcessorQueue. Assuming that Bitmessage wasn't closed + # forcefully, it should have saved the data in the queue into the + # objectprocessorqueue table. Let's pull it out. + sql_ready.wait() + queryreturn = sqlQuery( + 'SELECT objecttype, data FROM objectprocessorqueue') + for objectType, data in queryreturn: + queues.objectProcessorQueue.put((objectType, data)) + sqlExecute('DELETE FROM objectprocessorqueue') + logger.debug( + 'Loaded %s objects from disk into the objectProcessorQueue.', + len(queryreturn)) + self._ack_obj = bmproto.BMStringParser() + self.successfullyDecryptMessageTimings = [] + + def run(self): + """Process the objects from `.queues.objectProcessorQueue`""" + while True: + objectType, data = queues.objectProcessorQueue.get() + + if state.shutdown: + state.shutdown = 2 + break diff --git a/src/mock/class_singleWorker.py b/src/mock/class_singleWorker.py new file mode 100644 index 00000000..af9b8d83 --- /dev/null +++ b/src/mock/class_singleWorker.py @@ -0,0 +1,45 @@ +""" +Thread for performing PoW +""" + +from __future__ import division + +import proofofwork +import queues +import state + +from network import StoppableThread +from six.moves import queue + + +class MockSingleWorker(StoppableThread): + """Thread for performing PoW""" + + def __init__(self): + super(MockSingleWorker, self).__init__(name="singleWorker") + proofofwork.init() + + def stopThread(self): + """Signal through the queue that the thread should be stopped""" + + try: + queues.workerQueue.put(("stopThread", "data")) + except queue.Full: + self.logger.error('workerQueue is Full') + super(MockSingleWorker, self).stopThread() + + def run(self): + + if state.shutdown > 0: + return + + while state.shutdown == 0: + self.busy = 0 + command, data = queues.workerQueue.get() + self.busy = 1 + if command == 'stopThread': + self.busy = 0 + return + + queues.workerQueue.task_done() + self.logger.info("Quitting...") diff --git a/src/mock/inventory.py b/src/mock/inventory.py new file mode 100644 index 00000000..04bceaf6 --- /dev/null +++ b/src/mock/inventory.py @@ -0,0 +1,14 @@ +"""The Inventory singleton""" + +# TODO make this dynamic, and watch out for frozen, like with messagetypes +from singleton import Singleton + + +@Singleton +class MockInventory(): + """ + Inventory singleton class which uses storage backends + to manage the inventory. + """ + def __init__(self): + self.numberOfInventoryLookupsPerformed = 0 diff --git a/src/mock/network/connectionpool.py b/src/mock/network/connectionpool.py new file mode 100644 index 00000000..1981f631 --- /dev/null +++ b/src/mock/network/connectionpool.py @@ -0,0 +1,25 @@ +""" +`BMConnectionPool` class definition +""" +import logging + +import asyncore_pollchoose as asyncore +from bmconfigparser import BMConfigParser +from singleton import Singleton + +logger = logging.getLogger('default') + + +@Singleton +class MockBMConnectionPool(object): + """Pool of all existing connections""" + + def __init__(self): + asyncore.set_rates( + BMConfigParser().safeGetInt( + "bitmessagesettings", "maxdownloadrate"), + BMConfigParser().safeGetInt( + "bitmessagesettings", "maxuploadrate") + ) + self.outboundConnections = {} + self.inboundConnections = {} diff --git a/src/mock/network/stats.py b/src/mock/network/stats.py new file mode 100644 index 00000000..a5fe9072 --- /dev/null +++ b/src/mock/network/stats.py @@ -0,0 +1,13 @@ +""" +Network statistics +""" + + +def MockUploadSpeed(): + """Getting upload speed""" + return 0 + + +def MockDownloadSpeed(): + """Getting download speed""" + return 0 -- 2.47.2 From 3cc49da6f13a6ae9f2168139e60d11a70da65608 Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Thu, 2 Dec 2021 21:45:37 +0530 Subject: [PATCH 02/10] Fixed pylint, updated connectionpool & stats changes, added bitmessagemock file & moved mock folder to tests --- src/{ => tests}/mock/__init__.py | 0 src/tests/mock/bitmessagemock.py | 2 ++ src/{ => tests}/mock/class_addressGenerator.py | 0 src/{ => tests}/mock/class_objectProcessor.py | 1 + src/{ => tests}/mock/class_singleWorker.py | 3 ++- src/{ => tests}/mock/inventory.py | 2 +- src/{ => tests}/mock/kivy_main.py | 0 src/{ => tests}/mock/network/connectionpool.py | 8 +------- src/{ => tests}/mock/network/stats.py | 4 ++-- 9 files changed, 9 insertions(+), 11 deletions(-) rename src/{ => tests}/mock/__init__.py (100%) create mode 100644 src/tests/mock/bitmessagemock.py rename src/{ => tests}/mock/class_addressGenerator.py (100%) rename src/{ => tests}/mock/class_objectProcessor.py (97%) rename src/{ => tests}/mock/class_singleWorker.py (93%) rename src/{ => tests}/mock/inventory.py (86%) rename src/{ => tests}/mock/kivy_main.py (100%) rename src/{ => tests}/mock/network/connectionpool.py (63%) rename src/{ => tests}/mock/network/stats.py (71%) diff --git a/src/mock/__init__.py b/src/tests/mock/__init__.py similarity index 100% rename from src/mock/__init__.py rename to src/tests/mock/__init__.py diff --git a/src/tests/mock/bitmessagemock.py b/src/tests/mock/bitmessagemock.py new file mode 100644 index 00000000..4f023ea0 --- /dev/null +++ b/src/tests/mock/bitmessagemock.py @@ -0,0 +1,2 @@ +def main(): + pass \ No newline at end of file diff --git a/src/mock/class_addressGenerator.py b/src/tests/mock/class_addressGenerator.py similarity index 100% rename from src/mock/class_addressGenerator.py rename to src/tests/mock/class_addressGenerator.py diff --git a/src/mock/class_objectProcessor.py b/src/tests/mock/class_objectProcessor.py similarity index 97% rename from src/mock/class_objectProcessor.py rename to src/tests/mock/class_objectProcessor.py index 84958bb3..09120fc6 100644 --- a/src/mock/class_objectProcessor.py +++ b/src/tests/mock/class_objectProcessor.py @@ -43,6 +43,7 @@ class objectProcessor(threading.Thread): def run(self): """Process the objects from `.queues.objectProcessorQueue`""" while True: + # pylint: disable=unused-variable objectType, data = queues.objectProcessorQueue.get() if state.shutdown: diff --git a/src/mock/class_singleWorker.py b/src/tests/mock/class_singleWorker.py similarity index 93% rename from src/mock/class_singleWorker.py rename to src/tests/mock/class_singleWorker.py index af9b8d83..92ffffbd 100644 --- a/src/mock/class_singleWorker.py +++ b/src/tests/mock/class_singleWorker.py @@ -18,6 +18,7 @@ class MockSingleWorker(StoppableThread): def __init__(self): super(MockSingleWorker, self).__init__(name="singleWorker") proofofwork.init() + self.busy = None def stopThread(self): """Signal through the queue that the thread should be stopped""" @@ -35,7 +36,7 @@ class MockSingleWorker(StoppableThread): while state.shutdown == 0: self.busy = 0 - command, data = queues.workerQueue.get() + command, _ = queues.workerQueue.get() self.busy = 1 if command == 'stopThread': self.busy = 0 diff --git a/src/mock/inventory.py b/src/tests/mock/inventory.py similarity index 86% rename from src/mock/inventory.py rename to src/tests/mock/inventory.py index 04bceaf6..6468865d 100644 --- a/src/mock/inventory.py +++ b/src/tests/mock/inventory.py @@ -3,7 +3,7 @@ # TODO make this dynamic, and watch out for frozen, like with messagetypes from singleton import Singleton - +# pylint: disable=old-style-class,too-few-public-methods @Singleton class MockInventory(): """ diff --git a/src/mock/kivy_main.py b/src/tests/mock/kivy_main.py similarity index 100% rename from src/mock/kivy_main.py rename to src/tests/mock/kivy_main.py diff --git a/src/mock/network/connectionpool.py b/src/tests/mock/network/connectionpool.py similarity index 63% rename from src/mock/network/connectionpool.py rename to src/tests/mock/network/connectionpool.py index 1981f631..9aa16363 100644 --- a/src/mock/network/connectionpool.py +++ b/src/tests/mock/network/connectionpool.py @@ -9,17 +9,11 @@ from singleton import Singleton logger = logging.getLogger('default') - +# pylint: disable=too-few-public-methods @Singleton class MockBMConnectionPool(object): """Pool of all existing connections""" def __init__(self): - asyncore.set_rates( - BMConfigParser().safeGetInt( - "bitmessagesettings", "maxdownloadrate"), - BMConfigParser().safeGetInt( - "bitmessagesettings", "maxuploadrate") - ) self.outboundConnections = {} self.inboundConnections = {} diff --git a/src/mock/network/stats.py b/src/tests/mock/network/stats.py similarity index 71% rename from src/mock/network/stats.py rename to src/tests/mock/network/stats.py index a5fe9072..33dc4445 100644 --- a/src/mock/network/stats.py +++ b/src/tests/mock/network/stats.py @@ -3,11 +3,11 @@ Network statistics """ -def MockUploadSpeed(): +def uploadSpeed(): """Getting upload speed""" return 0 -def MockDownloadSpeed(): +def downloadSpeed(): """Getting download speed""" return 0 -- 2.47.2 From c5dee5f22e0f6fea3456653eb2ea24ed2e66eea9 Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Fri, 3 Dec 2021 12:31:28 +0530 Subject: [PATCH 03/10] Fixed linter --- src/tests/mock/bitmessagemock.py | 3 ++- src/tests/mock/inventory.py | 1 + src/tests/mock/network/connectionpool.py | 3 +-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/tests/mock/bitmessagemock.py b/src/tests/mock/bitmessagemock.py index 4f023ea0..71055c62 100644 --- a/src/tests/mock/bitmessagemock.py +++ b/src/tests/mock/bitmessagemock.py @@ -1,2 +1,3 @@ def main(): - pass \ No newline at end of file + """Mock main function""" + pass diff --git a/src/tests/mock/inventory.py b/src/tests/mock/inventory.py index 6468865d..4148156f 100644 --- a/src/tests/mock/inventory.py +++ b/src/tests/mock/inventory.py @@ -3,6 +3,7 @@ # TODO make this dynamic, and watch out for frozen, like with messagetypes from singleton import Singleton + # pylint: disable=old-style-class,too-few-public-methods @Singleton class MockInventory(): diff --git a/src/tests/mock/network/connectionpool.py b/src/tests/mock/network/connectionpool.py index 9aa16363..9347103d 100644 --- a/src/tests/mock/network/connectionpool.py +++ b/src/tests/mock/network/connectionpool.py @@ -3,12 +3,11 @@ """ import logging -import asyncore_pollchoose as asyncore -from bmconfigparser import BMConfigParser from singleton import Singleton logger = logging.getLogger('default') + # pylint: disable=too-few-public-methods @Singleton class MockBMConnectionPool(object): -- 2.47.2 From d00f5ee3a8c29451d47bf802a4305b50f9956e8e Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Mon, 13 Dec 2021 22:18:50 +0530 Subject: [PATCH 04/10] updated changes on bitmessagemock --- src/tests/mock/bitmessagemock.py | 35 ++++++++++++++++++++++++- src/tests/mock/class_objectProcessor.py | 2 +- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/src/tests/mock/bitmessagemock.py b/src/tests/mock/bitmessagemock.py index 71055c62..24491a5a 100644 --- a/src/tests/mock/bitmessagemock.py +++ b/src/tests/mock/bitmessagemock.py @@ -1,3 +1,36 @@ +from class_addressGenerator import FakeAddressGenerator +from class_singleWorker import MockSingleWorker +from class_objectProcessor import MockObjectProcessor +from inventory import MockInventory + + def main(): """Mock main function""" - pass + def start(self): + """Start main application""" + # pylint: disable=too-many-statements,too-many-branches,too-many-locals + + config = BMConfigParser() + daemon = config.safeGetBoolean('bitmessagesettings', 'daemon') + + # Start the address generation thread + addressGeneratorThread = addressGenerator() + # close the main program even if there are threads left + addressGeneratorThread.daemon = True + addressGeneratorThread.start() + + # Start the thread that calculates POWs + singleWorkerThread = MockSingleWorker() + # close the main program even if there are threads left + singleWorkerThread.daemon = True + singleWorkerThread.start() + + # Start the thread that calculates POWs + objectProcessorThread = MockObjectProcessor() + # DON'T close the main program even the thread remains. + # This thread checks the shutdown variable after processing + # each object. + objectProcessorThread.daemon = False + objectProcessorThread.start() + + MockInventory() # init diff --git a/src/tests/mock/class_objectProcessor.py b/src/tests/mock/class_objectProcessor.py index 09120fc6..c418abc6 100644 --- a/src/tests/mock/class_objectProcessor.py +++ b/src/tests/mock/class_objectProcessor.py @@ -15,7 +15,7 @@ from network import bmproto logger = logging.getLogger('default') -class objectProcessor(threading.Thread): +class MockObjectProcessor(threading.Thread): """ The objectProcessor thread, of which there is only one, receives network objects (msg, broadcast, pubkey, getpubkey) from the receiveDataThreads. -- 2.47.2 From 3aa6473b7df02bd98efc30d4b1df02e57ee9c8ae Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Thu, 16 Dec 2021 12:09:45 +0530 Subject: [PATCH 05/10] Updated --- src/tests/mock/bitmessagemock.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/tests/mock/bitmessagemock.py b/src/tests/mock/bitmessagemock.py index 24491a5a..47069d0e 100644 --- a/src/tests/mock/bitmessagemock.py +++ b/src/tests/mock/bitmessagemock.py @@ -4,8 +4,9 @@ from class_objectProcessor import MockObjectProcessor from inventory import MockInventory -def main(): +class MockMain(): """Mock main function""" + def start(self): """Start main application""" # pylint: disable=too-many-statements,too-many-branches,too-many-locals @@ -14,7 +15,7 @@ def main(): daemon = config.safeGetBoolean('bitmessagesettings', 'daemon') # Start the address generation thread - addressGeneratorThread = addressGenerator() + addressGeneratorThread = FakeAddressGenerator() # close the main program even if there are threads left addressGeneratorThread.daemon = True addressGeneratorThread.start() @@ -34,3 +35,13 @@ def main(): objectProcessorThread.start() MockInventory() # init + + +def main(): + """Triggers main module""" + mainprogram = MockMain() + mainprogram.start() + + +if __name__ == "__main__": + main() -- 2.47.2 From 249781582b120f8aca20764b00a4d4b69031129c Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Thu, 16 Dec 2021 20:27:53 +0530 Subject: [PATCH 06/10] created dummy pybitmessage folder in mock & fixing import for it --- src/tests/mock/bitmessagemock.py | 16 +- src/tests/mock/network/connectionpool.py | 18 - src/tests/mock/network/stats.py | 13 - src/tests/mock/pybitmessage/__init__.py | 0 src/tests/mock/pybitmessage/addresses.py | 283 +++ src/tests/mock/pybitmessage/api.py | 1537 ++++++++++++++ src/tests/mock/pybitmessage/bitmessagecli.py | 1887 +++++++++++++++++ src/tests/mock/pybitmessage/bitmessagemain.py | 431 ++++ src/tests/mock/pybitmessage/bmconfigparser.py | 271 +++ src/tests/mock/pybitmessage/build_osx.py | 38 + .../class_addressGenerator.py | 2 +- .../class_objectProcessor.py | 2 +- .../mock/pybitmessage/class_singleCleaner.py | 187 ++ .../{ => pybitmessage}/class_singleWorker.py | 2 +- .../mock/pybitmessage/class_smtpDeliver.py | 117 + .../mock/pybitmessage/class_smtpServer.py | 217 ++ .../mock/pybitmessage/class_sqlThread.py | 639 ++++++ src/tests/mock/pybitmessage/debug.py | 157 ++ src/tests/mock/pybitmessage/defaults.py | 24 + src/tests/mock/pybitmessage/depends.py | 450 ++++ .../mock/pybitmessage/fallback/__init__.py | 32 + .../fallback/umsgpack/__init__.py | 0 .../fallback/umsgpack/umsgpack.py | 1067 ++++++++++ .../mock/pybitmessage/helper_ackPayload.py | 51 + .../mock/pybitmessage/helper_addressbook.py | 14 + src/tests/mock/pybitmessage/helper_bitcoin.py | 56 + src/tests/mock/pybitmessage/helper_inbox.py | 30 + .../mock/pybitmessage/helper_msgcoding.py | 159 ++ src/tests/mock/pybitmessage/helper_random.py | 74 + src/tests/mock/pybitmessage/helper_search.py | 113 + src/tests/mock/pybitmessage/helper_sent.py | 48 + src/tests/mock/pybitmessage/helper_sql.py | 151 ++ src/tests/mock/pybitmessage/helper_startup.py | 392 ++++ .../mock/pybitmessage/highlevelcrypto.py | 146 ++ .../mock/{ => pybitmessage}/inventory.py | 2 +- src/tests/mock/pybitmessage/l10n.py | 152 ++ src/tests/mock/pybitmessage/main.py | 13 + src/tests/mock/pybitmessage/multiqueue.py | 54 + src/tests/mock/pybitmessage/namecoin.py | 374 ++++ .../mock/pybitmessage/network/__init__.py | 20 + .../mock/pybitmessage/network/addrthread.py | 49 + .../network/advanceddispatcher.py | 173 ++ .../pybitmessage/network/announcethread.py | 43 + .../mock/pybitmessage/network/assemble.py | 31 + .../network/asyncore_pollchoose.py | 1012 +++++++++ .../mock/pybitmessage/network/bmobject.py | 164 ++ .../mock/pybitmessage/network/bmproto.py | 709 +++++++ .../pybitmessage/network/connectionchooser.py | 77 + .../pybitmessage/network/connectionpool.py | 405 ++++ .../mock/pybitmessage/network/constants.py | 17 + .../mock/pybitmessage/network/dandelion.py | 196 ++ .../pybitmessage/network/downloadthread.py | 84 + src/tests/mock/pybitmessage/network/http.py | 89 + src/tests/mock/pybitmessage/network/httpd.py | 161 ++ src/tests/mock/pybitmessage/network/https.py | 71 + .../mock/pybitmessage/network/invthread.py | 111 + .../mock/pybitmessage/network/knownnodes.py | 269 +++ .../pybitmessage/network/networkthread.py | 42 + src/tests/mock/pybitmessage/network/node.py | 7 + .../mock/pybitmessage/network/objectracker.py | 136 ++ src/tests/mock/pybitmessage/network/proxy.py | 148 ++ .../network/receivequeuethread.py | 56 + .../mock/pybitmessage/network/socks4a.py | 147 ++ src/tests/mock/pybitmessage/network/socks5.py | 224 ++ src/tests/mock/pybitmessage/network/stats.py | 78 + src/tests/mock/pybitmessage/network/tcp.py | 448 ++++ .../mock/pybitmessage/network/threads.py | 49 + src/tests/mock/pybitmessage/network/tls.py | 220 ++ src/tests/mock/pybitmessage/network/udp.py | 147 ++ .../mock/pybitmessage/network/uploadthread.py | 69 + src/tests/mock/pybitmessage/openclpow.py | 111 + src/tests/mock/pybitmessage/openssl.py | 803 +++++++ src/tests/mock/pybitmessage/pathmagic.py | 10 + src/tests/mock/pybitmessage/paths.py | 131 ++ src/tests/mock/pybitmessage/proofofwork.py | 394 ++++ src/tests/mock/pybitmessage/protocol.py | 524 +++++ src/tests/mock/pybitmessage/pybitmessage | 11 + .../mock/pybitmessage/pyelliptic/__init__.py | 30 + .../pybitmessage/pyelliptic/arithmetic.py | 166 ++ .../mock/pybitmessage/pyelliptic/cipher.py | 90 + src/tests/mock/pybitmessage/pyelliptic/ecc.py | 501 +++++ .../mock/pybitmessage/pyelliptic/eccblind.py | 373 ++++ .../pybitmessage/pyelliptic/eccblindchain.py | 52 + .../mock/pybitmessage/pyelliptic/hash.py | 70 + .../mock/pybitmessage/pyelliptic/openssl.py | 803 +++++++ .../pybitmessage/pyelliptic/tests/__init__.py | 0 .../pyelliptic/tests/test_arithmetic.py | 84 + .../pyelliptic/tests/test_blindsig.py | 277 +++ .../pyelliptic/tests/test_openssl.py | 57 + src/tests/mock/pybitmessage/qidenticon.py | 276 +++ src/tests/mock/pybitmessage/queues.py | 55 + .../mock/pybitmessage/randomtrackingdict.py | 132 ++ src/tests/mock/pybitmessage/shared.py | 255 +++ src/tests/mock/pybitmessage/shutdown.py | 91 + src/tests/mock/pybitmessage/singleinstance.py | 111 + src/tests/mock/pybitmessage/singleton.py | 22 + src/tests/mock/pybitmessage/state.py | 72 + src/tests/mock/pybitmessage/testmode_init.py | 40 + src/tests/mock/pybitmessage/threads.py | 48 + src/tests/mock/pybitmessage/tr.py | 59 + src/tests/mock/pybitmessage/upnp.py | 348 +++ src/tests/mock/pybitmessage/version.py | 2 + 102 files changed, 20626 insertions(+), 43 deletions(-) delete mode 100644 src/tests/mock/network/connectionpool.py delete mode 100644 src/tests/mock/network/stats.py create mode 100644 src/tests/mock/pybitmessage/__init__.py create mode 100644 src/tests/mock/pybitmessage/addresses.py create mode 100644 src/tests/mock/pybitmessage/api.py create mode 100644 src/tests/mock/pybitmessage/bitmessagecli.py create mode 100755 src/tests/mock/pybitmessage/bitmessagemain.py create mode 100644 src/tests/mock/pybitmessage/bmconfigparser.py create mode 100644 src/tests/mock/pybitmessage/build_osx.py rename src/tests/mock/{ => pybitmessage}/class_addressGenerator.py (98%) rename src/tests/mock/{ => pybitmessage}/class_objectProcessor.py (97%) create mode 100644 src/tests/mock/pybitmessage/class_singleCleaner.py rename src/tests/mock/{ => pybitmessage}/class_singleWorker.py (96%) create mode 100644 src/tests/mock/pybitmessage/class_smtpDeliver.py create mode 100644 src/tests/mock/pybitmessage/class_smtpServer.py create mode 100644 src/tests/mock/pybitmessage/class_sqlThread.py create mode 100644 src/tests/mock/pybitmessage/debug.py create mode 100644 src/tests/mock/pybitmessage/defaults.py create mode 100755 src/tests/mock/pybitmessage/depends.py create mode 100644 src/tests/mock/pybitmessage/fallback/__init__.py create mode 100644 src/tests/mock/pybitmessage/fallback/umsgpack/__init__.py create mode 100644 src/tests/mock/pybitmessage/fallback/umsgpack/umsgpack.py create mode 100644 src/tests/mock/pybitmessage/helper_ackPayload.py create mode 100644 src/tests/mock/pybitmessage/helper_addressbook.py create mode 100644 src/tests/mock/pybitmessage/helper_bitcoin.py create mode 100644 src/tests/mock/pybitmessage/helper_inbox.py create mode 100644 src/tests/mock/pybitmessage/helper_msgcoding.py create mode 100644 src/tests/mock/pybitmessage/helper_random.py create mode 100644 src/tests/mock/pybitmessage/helper_search.py create mode 100644 src/tests/mock/pybitmessage/helper_sent.py create mode 100644 src/tests/mock/pybitmessage/helper_sql.py create mode 100644 src/tests/mock/pybitmessage/helper_startup.py create mode 100644 src/tests/mock/pybitmessage/highlevelcrypto.py rename src/tests/mock/{ => pybitmessage}/inventory.py (94%) create mode 100644 src/tests/mock/pybitmessage/l10n.py create mode 100644 src/tests/mock/pybitmessage/main.py create mode 100644 src/tests/mock/pybitmessage/multiqueue.py create mode 100644 src/tests/mock/pybitmessage/namecoin.py create mode 100644 src/tests/mock/pybitmessage/network/__init__.py create mode 100644 src/tests/mock/pybitmessage/network/addrthread.py create mode 100644 src/tests/mock/pybitmessage/network/advanceddispatcher.py create mode 100644 src/tests/mock/pybitmessage/network/announcethread.py create mode 100644 src/tests/mock/pybitmessage/network/assemble.py create mode 100644 src/tests/mock/pybitmessage/network/asyncore_pollchoose.py create mode 100644 src/tests/mock/pybitmessage/network/bmobject.py create mode 100644 src/tests/mock/pybitmessage/network/bmproto.py create mode 100644 src/tests/mock/pybitmessage/network/connectionchooser.py create mode 100644 src/tests/mock/pybitmessage/network/connectionpool.py create mode 100644 src/tests/mock/pybitmessage/network/constants.py create mode 100644 src/tests/mock/pybitmessage/network/dandelion.py create mode 100644 src/tests/mock/pybitmessage/network/downloadthread.py create mode 100644 src/tests/mock/pybitmessage/network/http.py create mode 100644 src/tests/mock/pybitmessage/network/httpd.py create mode 100644 src/tests/mock/pybitmessage/network/https.py create mode 100644 src/tests/mock/pybitmessage/network/invthread.py create mode 100644 src/tests/mock/pybitmessage/network/knownnodes.py create mode 100644 src/tests/mock/pybitmessage/network/networkthread.py create mode 100644 src/tests/mock/pybitmessage/network/node.py create mode 100644 src/tests/mock/pybitmessage/network/objectracker.py create mode 100644 src/tests/mock/pybitmessage/network/proxy.py create mode 100644 src/tests/mock/pybitmessage/network/receivequeuethread.py create mode 100644 src/tests/mock/pybitmessage/network/socks4a.py create mode 100644 src/tests/mock/pybitmessage/network/socks5.py create mode 100644 src/tests/mock/pybitmessage/network/stats.py create mode 100644 src/tests/mock/pybitmessage/network/tcp.py create mode 100644 src/tests/mock/pybitmessage/network/threads.py create mode 100644 src/tests/mock/pybitmessage/network/tls.py create mode 100644 src/tests/mock/pybitmessage/network/udp.py create mode 100644 src/tests/mock/pybitmessage/network/uploadthread.py create mode 100644 src/tests/mock/pybitmessage/openclpow.py create mode 100644 src/tests/mock/pybitmessage/openssl.py create mode 100644 src/tests/mock/pybitmessage/pathmagic.py create mode 100644 src/tests/mock/pybitmessage/paths.py create mode 100644 src/tests/mock/pybitmessage/proofofwork.py create mode 100644 src/tests/mock/pybitmessage/protocol.py create mode 100644 src/tests/mock/pybitmessage/pybitmessage create mode 100644 src/tests/mock/pybitmessage/pyelliptic/__init__.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/arithmetic.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/cipher.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/ecc.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/eccblind.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/eccblindchain.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/hash.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/openssl.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/tests/__init__.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/tests/test_arithmetic.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/tests/test_blindsig.py create mode 100644 src/tests/mock/pybitmessage/pyelliptic/tests/test_openssl.py create mode 100644 src/tests/mock/pybitmessage/qidenticon.py create mode 100644 src/tests/mock/pybitmessage/queues.py create mode 100644 src/tests/mock/pybitmessage/randomtrackingdict.py create mode 100644 src/tests/mock/pybitmessage/shared.py create mode 100644 src/tests/mock/pybitmessage/shutdown.py create mode 100644 src/tests/mock/pybitmessage/singleinstance.py create mode 100644 src/tests/mock/pybitmessage/singleton.py create mode 100644 src/tests/mock/pybitmessage/state.py create mode 100644 src/tests/mock/pybitmessage/testmode_init.py create mode 100644 src/tests/mock/pybitmessage/threads.py create mode 100644 src/tests/mock/pybitmessage/tr.py create mode 100644 src/tests/mock/pybitmessage/upnp.py create mode 100644 src/tests/mock/pybitmessage/version.py diff --git a/src/tests/mock/bitmessagemock.py b/src/tests/mock/bitmessagemock.py index 47069d0e..e309dc17 100644 --- a/src/tests/mock/bitmessagemock.py +++ b/src/tests/mock/bitmessagemock.py @@ -1,7 +1,7 @@ -from class_addressGenerator import FakeAddressGenerator -from class_singleWorker import MockSingleWorker -from class_objectProcessor import MockObjectProcessor -from inventory import MockInventory +from pybitmessage.class_addressGenerator import addressGenerator +from pybitmessage.class_singleWorker import singleWorker +from pybitmessage.class_objectProcessor import objectProcessor +from pybitmessage.inventory import Inventory class MockMain(): @@ -15,26 +15,26 @@ class MockMain(): daemon = config.safeGetBoolean('bitmessagesettings', 'daemon') # Start the address generation thread - addressGeneratorThread = FakeAddressGenerator() + addressGeneratorThread = addressGenerator() # close the main program even if there are threads left addressGeneratorThread.daemon = True addressGeneratorThread.start() # Start the thread that calculates POWs - singleWorkerThread = MockSingleWorker() + singleWorkerThread = singleWorker() # close the main program even if there are threads left singleWorkerThread.daemon = True singleWorkerThread.start() # Start the thread that calculates POWs - objectProcessorThread = MockObjectProcessor() + objectProcessorThread = objectProcessor() # DON'T close the main program even the thread remains. # This thread checks the shutdown variable after processing # each object. objectProcessorThread.daemon = False objectProcessorThread.start() - MockInventory() # init + Inventory() # init def main(): diff --git a/src/tests/mock/network/connectionpool.py b/src/tests/mock/network/connectionpool.py deleted file mode 100644 index 9347103d..00000000 --- a/src/tests/mock/network/connectionpool.py +++ /dev/null @@ -1,18 +0,0 @@ -""" -`BMConnectionPool` class definition -""" -import logging - -from singleton import Singleton - -logger = logging.getLogger('default') - - -# pylint: disable=too-few-public-methods -@Singleton -class MockBMConnectionPool(object): - """Pool of all existing connections""" - - def __init__(self): - self.outboundConnections = {} - self.inboundConnections = {} diff --git a/src/tests/mock/network/stats.py b/src/tests/mock/network/stats.py deleted file mode 100644 index 33dc4445..00000000 --- a/src/tests/mock/network/stats.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -Network statistics -""" - - -def uploadSpeed(): - """Getting upload speed""" - return 0 - - -def downloadSpeed(): - """Getting download speed""" - return 0 diff --git a/src/tests/mock/pybitmessage/__init__.py b/src/tests/mock/pybitmessage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/tests/mock/pybitmessage/addresses.py b/src/tests/mock/pybitmessage/addresses.py new file mode 100644 index 00000000..e48873a1 --- /dev/null +++ b/src/tests/mock/pybitmessage/addresses.py @@ -0,0 +1,283 @@ +""" +Operations with addresses +""" +# pylint: disable=inconsistent-return-statements +import hashlib +import logging +from binascii import hexlify, unhexlify +from struct import pack, unpack + + +logger = logging.getLogger('default') + +ALPHABET = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" + + +def encodeBase58(num): + """Encode a number in Base X + + Args: + num: The number to encode + alphabet: The alphabet to use for encoding + """ + if num < 0: + return None + if num == 0: + return ALPHABET[0] + arr = [] + base = len(ALPHABET) + while num: + num, rem = divmod(num, base) + arr.append(ALPHABET[rem]) + arr.reverse() + return ''.join(arr) + + +def decodeBase58(string): + """Decode a Base X encoded string into the number + + Args: + string: The encoded string + alphabet: The alphabet to use for encoding + """ + base = len(ALPHABET) + num = 0 + + try: + for char in string: + num *= base + num += ALPHABET.index(char) + except ValueError: + # character not found (like a space character or a 0) + return 0 + return num + + +class varintEncodeError(Exception): + """Exception class for encoding varint""" + pass + + +class varintDecodeError(Exception): + """Exception class for decoding varint data""" + pass + + +def encodeVarint(integer): + """Convert integer into varint bytes""" + if integer < 0: + raise varintEncodeError('varint cannot be < 0') + if integer < 253: + return pack('>B', integer) + if integer >= 253 and integer < 65536: + return pack('>B', 253) + pack('>H', integer) + if integer >= 65536 and integer < 4294967296: + return pack('>B', 254) + pack('>I', integer) + if integer >= 4294967296 and integer < 18446744073709551616: + return pack('>B', 255) + pack('>Q', integer) + if integer >= 18446744073709551616: + raise varintEncodeError('varint cannot be >= 18446744073709551616') + + +def decodeVarint(data): + """ + Decodes an encoded varint to an integer and returns it. + Per protocol v3, the encoded value must be encoded with + the minimum amount of data possible or else it is malformed. + Returns a tuple: (theEncodedValue, theSizeOfTheVarintInBytes) + """ + + if not data: + return (0, 0) + firstByte, = unpack('>B', data[0:1]) + if firstByte < 253: + # encodes 0 to 252 + return (firstByte, 1) # the 1 is the length of the varint + if firstByte == 253: + # encodes 253 to 65535 + if len(data) < 3: + raise varintDecodeError( + 'The first byte of this varint as an integer is %s' + ' but the total length is only %s. It needs to be' + ' at least 3.' % (firstByte, len(data))) + encodedValue, = unpack('>H', data[1:3]) + if encodedValue < 253: + raise varintDecodeError( + 'This varint does not encode the value with the lowest' + ' possible number of bytes.') + return (encodedValue, 3) + if firstByte == 254: + # encodes 65536 to 4294967295 + if len(data) < 5: + raise varintDecodeError( + 'The first byte of this varint as an integer is %s' + ' but the total length is only %s. It needs to be' + ' at least 5.' % (firstByte, len(data))) + encodedValue, = unpack('>I', data[1:5]) + if encodedValue < 65536: + raise varintDecodeError( + 'This varint does not encode the value with the lowest' + ' possible number of bytes.') + return (encodedValue, 5) + if firstByte == 255: + # encodes 4294967296 to 18446744073709551615 + if len(data) < 9: + raise varintDecodeError( + 'The first byte of this varint as an integer is %s' + ' but the total length is only %s. It needs to be' + ' at least 9.' % (firstByte, len(data))) + encodedValue, = unpack('>Q', data[1:9]) + if encodedValue < 4294967296: + raise varintDecodeError( + 'This varint does not encode the value with the lowest' + ' possible number of bytes.') + return (encodedValue, 9) + + +def calculateInventoryHash(data): + """Calculate inventory hash from object data""" + sha = hashlib.new('sha512') + sha2 = hashlib.new('sha512') + sha.update(data) + sha2.update(sha.digest()) + return sha2.digest()[0:32] + + +def encodeAddress(version, stream, ripe): + """Convert ripe to address""" + if version >= 2 and version < 4: + if len(ripe) != 20: + raise Exception( + 'Programming error in encodeAddress: The length of' + ' a given ripe hash was not 20.' + ) + + if ripe[:2] == b'\x00\x00': + ripe = ripe[2:] + elif ripe[:1] == b'\x00': + ripe = ripe[1:] + elif version == 4: + if len(ripe) != 20: + raise Exception( + 'Programming error in encodeAddress: The length of' + ' a given ripe hash was not 20.') + ripe = ripe.lstrip(b'\x00') + + storedBinaryData = encodeVarint(version) + encodeVarint(stream) + ripe + + # Generate the checksum + sha = hashlib.new('sha512') + sha.update(storedBinaryData) + currentHash = sha.digest() + sha = hashlib.new('sha512') + sha.update(currentHash) + checksum = sha.digest()[0:4] + + # FIXME: encodeBase58 should take binary data, to reduce conversions + # encodeBase58(storedBinaryData + checksum) + asInt = int(hexlify(storedBinaryData) + hexlify(checksum), 16) + # should it be str? If yes, it should be everywhere in the code + return 'BM-' + encodeBase58(asInt) + + +def decodeAddress(address): + """ + returns (status, address version number, stream number, + data (almost certainly a ripe hash)) + """ + # pylint: disable=too-many-return-statements,too-many-statements + # pylint: disable=too-many-branches + + address = str(address).strip() + + if address[:3] == 'BM-': + integer = decodeBase58(address[3:]) + else: + integer = decodeBase58(address) + if integer == 0: + status = 'invalidcharacters' + return status, 0, 0, '' + # after converting to hex, the string will be prepended + # with a 0x and appended with a L in python2 + hexdata = hex(integer)[2:].rstrip('L') + + if len(hexdata) % 2 != 0: + hexdata = '0' + hexdata + + data = unhexlify(hexdata) + checksum = data[-4:] + + sha = hashlib.new('sha512') + sha.update(data[:-4]) + currentHash = sha.digest() + sha = hashlib.new('sha512') + sha.update(currentHash) + + if checksum != sha.digest()[0:4]: + status = 'checksumfailed' + return status, 0, 0, '' + + try: + addressVersionNumber, bytesUsedByVersionNumber = decodeVarint(data[:9]) + except varintDecodeError as e: + logger.error(str(e)) + status = 'varintmalformed' + return status, 0, 0, '' + + if addressVersionNumber > 4: + logger.error('cannot decode address version numbers this high') + status = 'versiontoohigh' + return status, 0, 0, '' + elif addressVersionNumber == 0: + logger.error('cannot decode address version numbers of zero.') + status = 'versiontoohigh' + return status, 0, 0, '' + + try: + streamNumber, bytesUsedByStreamNumber = \ + decodeVarint(data[bytesUsedByVersionNumber:]) + except varintDecodeError as e: + logger.error(str(e)) + status = 'varintmalformed' + return status, 0, 0, '' + + status = 'success' + if addressVersionNumber == 1: + return status, addressVersionNumber, streamNumber, data[-24:-4] + elif addressVersionNumber == 2 or addressVersionNumber == 3: + embeddedRipeData = \ + data[bytesUsedByVersionNumber + bytesUsedByStreamNumber:-4] + if len(embeddedRipeData) == 19: + return status, addressVersionNumber, streamNumber, \ + b'\x00' + embeddedRipeData + elif len(embeddedRipeData) == 20: + return status, addressVersionNumber, streamNumber, \ + embeddedRipeData + elif len(embeddedRipeData) == 18: + return status, addressVersionNumber, streamNumber, \ + b'\x00\x00' + embeddedRipeData + elif len(embeddedRipeData) < 18: + return 'ripetooshort', 0, 0, '' + elif len(embeddedRipeData) > 20: + return 'ripetoolong', 0, 0, '' + return 'otherproblem', 0, 0, '' + elif addressVersionNumber == 4: + embeddedRipeData = \ + data[bytesUsedByVersionNumber + bytesUsedByStreamNumber:-4] + if embeddedRipeData[0:1] == b'\x00': + # In order to enforce address non-malleability, encoded + # RIPE data must have NULL bytes removed from the front + return 'encodingproblem', 0, 0, '' + elif len(embeddedRipeData) > 20: + return 'ripetoolong', 0, 0, '' + elif len(embeddedRipeData) < 4: + return 'ripetooshort', 0, 0, '' + x00string = b'\x00' * (20 - len(embeddedRipeData)) + return status, addressVersionNumber, streamNumber, \ + x00string + embeddedRipeData + + +def addBMIfNotPresent(address): + """Prepend BM- to an address if it doesn't already have it""" + address = str(address).strip() + return address if address[:3] == 'BM-' else 'BM-' + address diff --git a/src/tests/mock/pybitmessage/api.py b/src/tests/mock/pybitmessage/api.py new file mode 100644 index 00000000..de220cc4 --- /dev/null +++ b/src/tests/mock/pybitmessage/api.py @@ -0,0 +1,1537 @@ +# Copyright (c) 2012-2016 Jonathan Warren +# Copyright (c) 2012-2020 The Bitmessage developers + +""" +This is not what you run to start the Bitmessage API. +Instead, `enable the API `_ +and optionally `enable daemon mode `_ +then run the PyBitmessage. + +The PyBitmessage API is provided either as +`XML-RPC `_ or +`JSON-RPC `_ like in bitcoin. +It's selected according to 'apivariant' setting in config file. + +Special value ``apivariant=legacy`` is to mimic the old pre 0.6.3 +behaviour when any results are returned as strings of json. + +.. list-table:: All config settings related to API: + :header-rows: 0 + + * - apienabled = true + - if 'false' the `singleAPI` wont start + * - apiinterface = 127.0.0.1 + - this is the recommended default + * - apiport = 8442 + - the API listens apiinterface:apiport if apiport is not used, + random in range (32767, 65535) otherwice + * - apivariant = xml + - current default for backward compatibility, 'json' is recommended + * - apiusername = username + - set the username + * - apipassword = password + - and the password + * - apinotifypath = + - not really the API setting, this sets a path for the executable to be ran + when certain internal event happens + +To use the API concider such simple example: + +.. code-block:: python + + import jsonrpclib + + from pybitmessage import bmconfigparser, helper_startup + + helper_startup.loadConfig() # find and load local config file + conf = bmconfigparser.BMConfigParser() + api_uri = "http://%s:%s@127.0.0.1:8442/" % ( + conf.safeGet('bitmessagesettings', 'apiusername'), + conf.safeGet('bitmessagesettings', 'apipassword') + ) + api = jsonrpclib.ServerProxy(api_uri) + print(api.clientStatus()) + + +For further examples please reference `.tests.test_api`. +""" + +import base64 +import ConfigParser +import errno +import hashlib +import httplib +import json +import random # nosec +import socket +import subprocess +import time +import xmlrpclib +from binascii import hexlify, unhexlify +from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler, SimpleXMLRPCServer +from struct import pack + +import defaults +import helper_inbox +import helper_sent +import network.stats +import proofofwork +import queues +import shared +import shutdown +import state +from addresses import ( + addBMIfNotPresent, + calculateInventoryHash, + decodeAddress, + decodeVarint, + varintDecodeError +) +from bmconfigparser import BMConfigParser +from debug import logger +from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery, sqlStoredProcedure, sql_ready +from inventory import Inventory +from network.threads import StoppableThread +from six.moves import queue +from version import softwareVersion + +try: # TODO: write tests for XML vulnerabilities + from defusedxml.xmlrpc import monkey_patch +except ImportError: + logger.warning( + 'defusedxml not available, only use API on a secure, closed network.') +else: + monkey_patch() + + +str_chan = '[chan]' +str_broadcast_subscribers = '[Broadcast subscribers]' + + +class ErrorCodes(type): + """Metaclass for :class:`APIError` documenting error codes.""" + _CODES = { + 0: 'Invalid command parameters number', + 1: 'The specified passphrase is blank.', + 2: 'The address version number currently must be 3, 4, or 0' + ' (which means auto-select).', + 3: 'The stream number must be 1 (or 0 which means' + ' auto-select). Others aren\'t supported.', + 4: 'Why would you ask me to generate 0 addresses for you?', + 5: 'You have (accidentally?) specified too many addresses to' + ' make. Maximum 999. This check only exists to prevent' + ' mischief; if you really want to create more addresses than' + ' this, contact the Bitmessage developers and we can modify' + ' the check or you can do it yourself by searching the source' + ' code for this message.', + 6: 'The encoding type must be 2 or 3.', + 7: 'Could not decode address', + 8: 'Checksum failed for address', + 9: 'Invalid characters in address', + 10: 'Address version number too high (or zero)', + 11: 'The address version number currently must be 2, 3 or 4.' + ' Others aren\'t supported. Check the address.', + 12: 'The stream number must be 1. Others aren\'t supported.' + ' Check the address.', + 13: 'Could not find this address in your keys.dat file.', + 14: 'Your fromAddress is disabled. Cannot send.', + 15: 'Invalid ackData object size.', + 16: 'You are already subscribed to that address.', + 17: 'Label is not valid UTF-8 data.', + 18: 'Chan name does not match address.', + 19: 'The length of hash should be 32 bytes (encoded in hex' + ' thus 64 characters).', + 20: 'Invalid method:', + 21: 'Unexpected API Failure', + 22: 'Decode error', + 23: 'Bool expected in eighteenByteRipe', + 24: 'Chan address is already present.', + 25: 'Specified address is not a chan address.' + ' Use deleteAddress API call instead.', + 26: 'Malformed varint in address: ', + 27: 'Message is too long.' + } + + def __new__(mcs, name, bases, namespace): + result = super(ErrorCodes, mcs).__new__(mcs, name, bases, namespace) + for code in mcs._CODES.iteritems(): + # beware: the formatting is adjusted for list-table + result.__doc__ += """ * - %04i + - %s + """ % code + return result + + +class APIError(xmlrpclib.Fault): + """ + APIError exception class + + .. list-table:: Possible error values + :header-rows: 1 + :widths: auto + + * - Error Number + - Message + """ + __metaclass__ = ErrorCodes + + def __str__(self): + return "API Error %04i: %s" % (self.faultCode, self.faultString) + + +# This thread, of which there is only one, runs the API. +class singleAPI(StoppableThread): + """API thread""" + + name = "singleAPI" + + def stopThread(self): + super(singleAPI, self).stopThread() + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + s.connect(( + BMConfigParser().get('bitmessagesettings', 'apiinterface'), + BMConfigParser().getint('bitmessagesettings', 'apiport') + )) + s.shutdown(socket.SHUT_RDWR) + s.close() + except BaseException: + pass + + def run(self): + """ + The instance of `SimpleXMLRPCServer.SimpleXMLRPCServer` or + :class:`jsonrpclib.SimpleJSONRPCServer` is created and started here + with `BMRPCDispatcher` dispatcher. + """ + port = BMConfigParser().getint('bitmessagesettings', 'apiport') + try: + getattr(errno, 'WSAEADDRINUSE') + except AttributeError: + errno.WSAEADDRINUSE = errno.EADDRINUSE + + RPCServerBase = SimpleXMLRPCServer + ct = 'text/xml' + if BMConfigParser().safeGet( + 'bitmessagesettings', 'apivariant') == 'json': + try: + from jsonrpclib.SimpleJSONRPCServer import ( + SimpleJSONRPCServer as RPCServerBase) + except ImportError: + logger.warning( + 'jsonrpclib not available, failing back to XML-RPC') + else: + ct = 'application/json-rpc' + + # Nested class. FIXME not found a better solution. + class StoppableRPCServer(RPCServerBase): + """A SimpleXMLRPCServer that honours state.shutdown""" + allow_reuse_address = True + content_type = ct + + def serve_forever(self, poll_interval=None): + """Start the RPCServer""" + sql_ready.wait() + while state.shutdown == 0: + self.handle_request() + + for attempt in range(50): + try: + if attempt > 0: + logger.warning( + 'Failed to start API listener on port %s', port) + port = random.randint(32767, 65535) + se = StoppableRPCServer( + (BMConfigParser().get( + 'bitmessagesettings', 'apiinterface'), + port), + BMXMLRPCRequestHandler, True, encoding='UTF-8') + except socket.error as e: + if e.errno in (errno.EADDRINUSE, errno.WSAEADDRINUSE): + continue + else: + if attempt > 0: + logger.warning('Setting apiport to %s', port) + BMConfigParser().set( + 'bitmessagesettings', 'apiport', str(port)) + BMConfigParser().save() + break + + se.register_instance(BMRPCDispatcher()) + se.register_introspection_functions() + + apiNotifyPath = BMConfigParser().safeGet( + 'bitmessagesettings', 'apinotifypath') + + if apiNotifyPath: + logger.info('Trying to call %s', apiNotifyPath) + try: + subprocess.call([apiNotifyPath, "startingUp"]) + except OSError: + logger.warning( + 'Failed to call %s, removing apinotifypath setting', + apiNotifyPath) + BMConfigParser().remove_option( + 'bitmessagesettings', 'apinotifypath') + + se.serve_forever() + + +class CommandHandler(type): + """ + The metaclass for `BMRPCDispatcher` which fills _handlers dict by + methods decorated with @command + """ + def __new__(mcs, name, bases, namespace): + # pylint: disable=protected-access + result = super(CommandHandler, mcs).__new__( + mcs, name, bases, namespace) + result.config = BMConfigParser() + result._handlers = {} + apivariant = result.config.safeGet('bitmessagesettings', 'apivariant') + for func in namespace.values(): + try: + for alias in getattr(func, '_cmd'): + try: + prefix, alias = alias.split(':') + if apivariant != prefix: + continue + except ValueError: + pass + result._handlers[alias] = func + except AttributeError: + pass + return result + + +class testmode(object): # pylint: disable=too-few-public-methods + """Decorator to check testmode & route to command decorator""" + + def __init__(self, *aliases): + self.aliases = aliases + + def __call__(self, func): + """Testmode call method""" + + if not state.testmode: + return None + return command(self.aliases[0]).__call__(func) + + +class command(object): # pylint: disable=too-few-public-methods + """Decorator for API command method""" + def __init__(self, *aliases): + self.aliases = aliases + + def __call__(self, func): + + if BMConfigParser().safeGet( + 'bitmessagesettings', 'apivariant') == 'legacy': + def wrapper(*args): + """ + A wrapper for legacy apivariant which dumps the result + into string of json + """ + result = func(*args) + return result if isinstance(result, (int, str)) \ + else json.dumps(result, indent=4) + wrapper.__doc__ = func.__doc__ + else: + wrapper = func + # pylint: disable=protected-access + wrapper._cmd = self.aliases + wrapper.__doc__ = """Commands: *%s* + + """ % ', '.join(self.aliases) + wrapper.__doc__.lstrip() + return wrapper + + +# This is one of several classes that constitute the API +# This class was written by Vaibhav Bhatia. +# Modified by Jonathan Warren (Atheros). +# Further modified by the Bitmessage developers +# http://code.activestate.com/recipes/501148 +class BMXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): + """The main API handler""" + + # pylint: disable=protected-access + def do_POST(self): + """ + Handles the HTTP POST request. + + Attempts to interpret all HTTP POST requests as XML-RPC calls, + which are forwarded to the server's _dispatch method for handling. + + .. note:: this method is the same as in + `SimpleXMLRPCServer.SimpleXMLRPCRequestHandler`, + just hacked to handle cookies + """ + + # Check that the path is legal + if not self.is_rpc_path_valid(): + self.report_404() + return + + try: + # Get arguments by reading body of request. + # We read this in chunks to avoid straining + # socket.read(); around the 10 or 15Mb mark, some platforms + # begin to have problems (bug #792570). + max_chunk_size = 10 * 1024 * 1024 + size_remaining = int(self.headers["content-length"]) + L = [] + while size_remaining: + chunk_size = min(size_remaining, max_chunk_size) + L.append(self.rfile.read(chunk_size)) + size_remaining -= len(L[-1]) + data = ''.join(L) + + # pylint: disable=attribute-defined-outside-init + self.cookies = [] + + validuser = self.APIAuthenticateClient() + if not validuser: + time.sleep(2) + self.send_response(httplib.UNAUTHORIZED) + self.end_headers() + return + # "RPC Username or password incorrect or HTTP header" + # " lacks authentication at all." + else: + # In previous versions of SimpleXMLRPCServer, _dispatch + # could be overridden in this class, instead of in + # SimpleXMLRPCDispatcher. To maintain backwards compatibility, + # check to see if a subclass implements _dispatch and dispatch + # using that method if present. + + response = self.server._marshaled_dispatch( + data, getattr(self, '_dispatch', None) + ) + except Exception: # This should only happen if the module is buggy + # internal error, report as HTTP server error + self.send_response(httplib.INTERNAL_SERVER_ERROR) + self.end_headers() + else: + # got a valid XML RPC response + self.send_response(httplib.OK) + self.send_header("Content-type", self.server.content_type) + self.send_header("Content-length", str(len(response))) + + # HACK :start -> sends cookies here + if self.cookies: + for cookie in self.cookies: + self.send_header('Set-Cookie', cookie.output(header='')) + # HACK :end + + self.end_headers() + self.wfile.write(response) + + # shut down the connection + self.wfile.flush() + self.connection.shutdown(1) + + # actually handle shutdown command after sending response + if state.shutdown is False: + shutdown.doCleanShutdown() + + def APIAuthenticateClient(self): + """ + Predicate to check for valid API credentials in the request header + """ + + if 'Authorization' in self.headers: + # handle Basic authentication + encstr = self.headers.get('Authorization').split()[1] + emailid, password = encstr.decode('base64').split(':') + return ( + emailid == BMConfigParser().get( + 'bitmessagesettings', 'apiusername' + ) and password == BMConfigParser().get( + 'bitmessagesettings', 'apipassword')) + else: + logger.warning( + 'Authentication failed because header lacks' + ' Authentication field') + time.sleep(2) + + return False + + +# pylint: disable=no-self-use,no-member,too-many-public-methods +class BMRPCDispatcher(object): + """This class is used to dispatch API commands""" + __metaclass__ = CommandHandler + + @staticmethod + def _decode(text, decode_type): + try: + if decode_type == 'hex': + return unhexlify(text) + elif decode_type == 'base64': + return base64.b64decode(text) + except Exception as e: + raise APIError( + 22, 'Decode error - %s. Had trouble while decoding string: %r' + % (e, text) + ) + + def _verifyAddress(self, address): + status, addressVersionNumber, streamNumber, ripe = \ + decodeAddress(address) + if status != 'success': + if status == 'checksumfailed': + raise APIError(8, 'Checksum failed for address: ' + address) + if status == 'invalidcharacters': + raise APIError(9, 'Invalid characters in address: ' + address) + if status == 'versiontoohigh': + raise APIError( + 10, 'Address version number too high (or zero) in address: ' + + address) + if status == 'varintmalformed': + raise APIError(26, 'Malformed varint in address: ' + address) + raise APIError( + 7, 'Could not decode address: %s : %s' % (address, status)) + if addressVersionNumber < 2 or addressVersionNumber > 4: + raise APIError( + 11, 'The address version number currently must be 2, 3 or 4.' + ' Others aren\'t supported. Check the address.' + ) + if streamNumber != 1: + raise APIError( + 12, 'The stream number must be 1. Others aren\'t supported.' + ' Check the address.' + ) + + return { + 'status': status, + 'addressVersion': addressVersionNumber, + 'streamNumber': streamNumber, + 'ripe': base64.b64encode(ripe) + } if self._method == 'decodeAddress' else ( + status, addressVersionNumber, streamNumber, ripe) + + @staticmethod + def _dump_inbox_message( # pylint: disable=too-many-arguments + msgid, toAddress, fromAddress, subject, received, + message, encodingtype, read): + subject = shared.fixPotentiallyInvalidUTF8Data(subject) + message = shared.fixPotentiallyInvalidUTF8Data(message) + return { + 'msgid': hexlify(msgid), + 'toAddress': toAddress, + 'fromAddress': fromAddress, + 'subject': base64.b64encode(subject), + 'message': base64.b64encode(message), + 'encodingType': encodingtype, + 'receivedTime': received, + 'read': read + } + + @staticmethod + def _dump_sent_message( # pylint: disable=too-many-arguments + msgid, toAddress, fromAddress, subject, lastactiontime, + message, encodingtype, status, ackdata): + subject = shared.fixPotentiallyInvalidUTF8Data(subject) + message = shared.fixPotentiallyInvalidUTF8Data(message) + return { + 'msgid': hexlify(msgid), + 'toAddress': toAddress, + 'fromAddress': fromAddress, + 'subject': base64.b64encode(subject), + 'message': base64.b64encode(message), + 'encodingType': encodingtype, + 'lastActionTime': lastactiontime, + 'status': status, + 'ackData': hexlify(ackdata) + } + + # Request Handlers + + @command('decodeAddress') + def HandleDecodeAddress(self, address): + """ + Decode given address and return dict with + status, addressVersion, streamNumber and ripe keys + """ + return self._verifyAddress(address) + + @command('listAddresses', 'listAddresses2') + def HandleListAddresses(self): + """ + Returns dict with a list of all used addresses with their properties + in the *addresses* key. + """ + data = [] + for address in self.config.addresses(): + streamNumber = decodeAddress(address)[2] + label = self.config.get(address, 'label') + if self._method == 'listAddresses2': + label = base64.b64encode(label) + data.append({ + 'label': label, + 'address': address, + 'stream': streamNumber, + 'enabled': self.config.safeGetBoolean(address, 'enabled'), + 'chan': self.config.safeGetBoolean(address, 'chan') + }) + return {'addresses': data} + + # the listAddressbook alias should be removed eventually. + @command('listAddressBookEntries', 'legacy:listAddressbook') + def HandleListAddressBookEntries(self, label=None): + """ + Returns dict with a list of all address book entries (address and label) + in the *addresses* key. + """ + queryreturn = sqlQuery( + "SELECT label, address from addressbook WHERE label = ?", + label + ) if label else sqlQuery("SELECT label, address from addressbook") + data = [] + for label, address in queryreturn: + label = shared.fixPotentiallyInvalidUTF8Data(label) + data.append({ + 'label': base64.b64encode(label), + 'address': address + }) + return {'addresses': data} + + # the addAddressbook alias should be deleted eventually. + @command('addAddressBookEntry', 'legacy:addAddressbook') + def HandleAddAddressBookEntry(self, address, label): + """Add an entry to address book. label must be base64 encoded.""" + label = self._decode(label, "base64") + address = addBMIfNotPresent(address) + self._verifyAddress(address) + # TODO: add unique together constraint in the table + queryreturn = sqlQuery( + "SELECT address FROM addressbook WHERE address=?", address) + if queryreturn != []: + raise APIError( + 16, 'You already have this address in your address book.') + + sqlExecute("INSERT INTO addressbook VALUES(?,?)", label, address) + queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) + queues.UISignalQueue.put(('rerenderMessagelistToLabels', '')) + queues.UISignalQueue.put(('rerenderAddressBook', '')) + return "Added address %s to address book" % address + + # the deleteAddressbook alias should be deleted eventually. + @command('deleteAddressBookEntry', 'legacy:deleteAddressbook') + def HandleDeleteAddressBookEntry(self, address): + """Delete an entry from address book.""" + address = addBMIfNotPresent(address) + self._verifyAddress(address) + sqlExecute('DELETE FROM addressbook WHERE address=?', address) + queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) + queues.UISignalQueue.put(('rerenderMessagelistToLabels', '')) + queues.UISignalQueue.put(('rerenderAddressBook', '')) + return "Deleted address book entry for %s if it existed" % address + + @command('createRandomAddress') + def HandleCreateRandomAddress( + self, label, eighteenByteRipe=False, totalDifficulty=0, + smallMessageDifficulty=0 + ): + """ + Create one address using the random number generator. + + :param str label: base64 encoded label for the address + :param bool eighteenByteRipe: is telling Bitmessage whether to + generate an address with an 18 byte RIPE hash + (as opposed to a 19 byte hash). + """ + + nonceTrialsPerByte = self.config.get( + 'bitmessagesettings', 'defaultnoncetrialsperbyte' + ) if not totalDifficulty else int( + defaults.networkDefaultProofOfWorkNonceTrialsPerByte + * totalDifficulty) + payloadLengthExtraBytes = self.config.get( + 'bitmessagesettings', 'defaultpayloadlengthextrabytes' + ) if not smallMessageDifficulty else int( + defaults.networkDefaultPayloadLengthExtraBytes + * smallMessageDifficulty) + + if not isinstance(eighteenByteRipe, bool): + raise APIError( + 23, 'Bool expected in eighteenByteRipe, saw %s instead' + % type(eighteenByteRipe)) + label = self._decode(label, "base64") + try: + label.decode('utf-8') + except UnicodeDecodeError: + raise APIError(17, 'Label is not valid UTF-8 data.') + queues.apiAddressGeneratorReturnQueue.queue.clear() + # FIXME hard coded stream no + streamNumberForAddress = 1 + queues.addressGeneratorQueue.put(( + 'createRandomAddress', 4, streamNumberForAddress, label, 1, "", + eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes + )) + return queues.apiAddressGeneratorReturnQueue.get() + + # pylint: disable=too-many-arguments + @command('createDeterministicAddresses') + def HandleCreateDeterministicAddresses( + self, passphrase, numberOfAddresses=1, addressVersionNumber=0, + streamNumber=0, eighteenByteRipe=False, totalDifficulty=0, + smallMessageDifficulty=0 + ): + """ + Create many addresses deterministically using the passphrase. + + :param str passphrase: base64 encoded passphrase + :param int numberOfAddresses: number of addresses to create, + up to 999 + + *addressVersionNumber* and *streamNumber* may be set to 0 + which will tell Bitmessage to use the most up-to-date + address version and the most available stream. + """ + + nonceTrialsPerByte = self.config.get( + 'bitmessagesettings', 'defaultnoncetrialsperbyte' + ) if not totalDifficulty else int( + defaults.networkDefaultProofOfWorkNonceTrialsPerByte + * totalDifficulty) + payloadLengthExtraBytes = self.config.get( + 'bitmessagesettings', 'defaultpayloadlengthextrabytes' + ) if not smallMessageDifficulty else int( + defaults.networkDefaultPayloadLengthExtraBytes + * smallMessageDifficulty) + + if not passphrase: + raise APIError(1, 'The specified passphrase is blank.') + if not isinstance(eighteenByteRipe, bool): + raise APIError( + 23, 'Bool expected in eighteenByteRipe, saw %s instead' + % type(eighteenByteRipe)) + passphrase = self._decode(passphrase, "base64") + # 0 means "just use the proper addressVersionNumber" + if addressVersionNumber == 0: + addressVersionNumber = 4 + if addressVersionNumber not in (3, 4): + raise APIError( + 2, 'The address version number currently must be 3, 4, or 0' + ' (which means auto-select). %i isn\'t supported.' + % addressVersionNumber) + if streamNumber == 0: # 0 means "just use the most available stream" + streamNumber = 1 # FIXME hard coded stream no + if streamNumber != 1: + raise APIError( + 3, 'The stream number must be 1 (or 0 which means' + ' auto-select). Others aren\'t supported.') + if numberOfAddresses == 0: + raise APIError( + 4, 'Why would you ask me to generate 0 addresses for you?') + if numberOfAddresses > 999: + raise APIError( + 5, 'You have (accidentally?) specified too many addresses to' + ' make. Maximum 999. This check only exists to prevent' + ' mischief; if you really want to create more addresses than' + ' this, contact the Bitmessage developers and we can modify' + ' the check or you can do it yourself by searching the source' + ' code for this message.') + queues.apiAddressGeneratorReturnQueue.queue.clear() + logger.debug( + 'Requesting that the addressGenerator create %s addresses.', + numberOfAddresses) + queues.addressGeneratorQueue.put(( + 'createDeterministicAddresses', addressVersionNumber, streamNumber, + 'unused API address', numberOfAddresses, passphrase, + eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes + )) + + return {'addresses': queues.apiAddressGeneratorReturnQueue.get()} + + @command('getDeterministicAddress') + def HandleGetDeterministicAddress( + self, passphrase, addressVersionNumber, streamNumber): + """ + Similar to *createDeterministicAddresses* except that the one + address that is returned will not be added to the Bitmessage + user interface or the keys.dat file. + """ + + numberOfAddresses = 1 + eighteenByteRipe = False + if not passphrase: + raise APIError(1, 'The specified passphrase is blank.') + passphrase = self._decode(passphrase, "base64") + if addressVersionNumber not in (3, 4): + raise APIError( + 2, 'The address version number currently must be 3 or 4. %i' + ' isn\'t supported.' % addressVersionNumber) + if streamNumber != 1: + raise APIError( + 3, ' The stream number must be 1. Others aren\'t supported.') + queues.apiAddressGeneratorReturnQueue.queue.clear() + logger.debug( + 'Requesting that the addressGenerator create %s addresses.', + numberOfAddresses) + queues.addressGeneratorQueue.put(( + 'getDeterministicAddress', addressVersionNumber, streamNumber, + 'unused API address', numberOfAddresses, passphrase, + eighteenByteRipe + )) + return queues.apiAddressGeneratorReturnQueue.get() + + @command('createChan') + def HandleCreateChan(self, passphrase): + """ + Creates a new chan. passphrase must be base64 encoded. + Returns the corresponding Bitmessage address. + """ + + passphrase = self._decode(passphrase, "base64") + if not passphrase: + raise APIError(1, 'The specified passphrase is blank.') + # It would be nice to make the label the passphrase but it is + # possible that the passphrase contains non-utf-8 characters. + try: + passphrase.decode('utf-8') + label = str_chan + ' ' + passphrase + except UnicodeDecodeError: + label = str_chan + ' ' + repr(passphrase) + + addressVersionNumber = 4 + streamNumber = 1 + queues.apiAddressGeneratorReturnQueue.queue.clear() + logger.debug( + 'Requesting that the addressGenerator create chan %s.', passphrase) + queues.addressGeneratorQueue.put(( + 'createChan', addressVersionNumber, streamNumber, label, + passphrase, True + )) + queueReturn = queues.apiAddressGeneratorReturnQueue.get() + try: + return queueReturn[0] + except IndexError: + raise APIError(24, 'Chan address is already present.') + + @command('joinChan') + def HandleJoinChan(self, passphrase, suppliedAddress): + """ + Join a chan. passphrase must be base64 encoded. Returns 'success'. + """ + + passphrase = self._decode(passphrase, "base64") + if not passphrase: + raise APIError(1, 'The specified passphrase is blank.') + # It would be nice to make the label the passphrase but it is + # possible that the passphrase contains non-utf-8 characters. + try: + passphrase.decode('utf-8') + label = str_chan + ' ' + passphrase + except UnicodeDecodeError: + label = str_chan + ' ' + repr(passphrase) + + self._verifyAddress(suppliedAddress) + suppliedAddress = addBMIfNotPresent(suppliedAddress) + queues.apiAddressGeneratorReturnQueue.queue.clear() + queues.addressGeneratorQueue.put(( + 'joinChan', suppliedAddress, label, passphrase, True + )) + queueReturn = queues.apiAddressGeneratorReturnQueue.get() + try: + if queueReturn[0] == 'chan name does not match address': + raise APIError(18, 'Chan name does not match address.') + except IndexError: + raise APIError(24, 'Chan address is already present.') + + return "success" + + @command('leaveChan') + def HandleLeaveChan(self, address): + """ + Leave a chan. Returns 'success'. + + .. note:: at this time, the address is still shown in the UI + until a restart. + """ + self._verifyAddress(address) + address = addBMIfNotPresent(address) + if not self.config.safeGetBoolean(address, 'chan'): + raise APIError( + 25, 'Specified address is not a chan address.' + ' Use deleteAddress API call instead.') + try: + self.config.remove_section(address) + except ConfigParser.NoSectionError: + raise APIError( + 13, 'Could not find this address in your keys.dat file.') + self.config.save() + queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) + queues.UISignalQueue.put(('rerenderMessagelistToLabels', '')) + return "success" + + @command('deleteAddress') + def HandleDeleteAddress(self, address): + """ + Permanently delete the address from keys.dat file. Returns 'success'. + """ + self._verifyAddress(address) + address = addBMIfNotPresent(address) + try: + self.config.remove_section(address) + except ConfigParser.NoSectionError: + raise APIError( + 13, 'Could not find this address in your keys.dat file.') + self.config.save() + queues.UISignalQueue.put(('writeNewAddressToTable', ('', '', ''))) + shared.reloadMyAddressHashes() + return "success" + + @command('getAllInboxMessages') + def HandleGetAllInboxMessages(self): + """ + Returns a dict with all inbox messages in the *inboxMessages* key. + The message is a dict with such keys: + *msgid*, *toAddress*, *fromAddress*, *subject*, *message*, + *encodingType*, *receivedTime*, *read*. + *msgid* is hex encoded string. + *subject* and *message* are base64 encoded. + """ + + queryreturn = sqlQuery( + "SELECT msgid, toaddress, fromaddress, subject, received, message," + " encodingtype, read FROM inbox WHERE folder='inbox'" + " ORDER BY received" + ) + return {"inboxMessages": [ + self._dump_inbox_message(*data) for data in queryreturn + ]} + + @command('getAllInboxMessageIds', 'getAllInboxMessageIDs') + def HandleGetAllInboxMessageIds(self): + """ + The same as *getAllInboxMessages* but returns only *msgid*s, + result key - *inboxMessageIds*. + """ + + queryreturn = sqlQuery( + "SELECT msgid FROM inbox where folder='inbox' ORDER BY received") + + return {"inboxMessageIds": [ + {'msgid': hexlify(msgid)} for msgid, in queryreturn + ]} + + @command('getInboxMessageById', 'getInboxMessageByID') + def HandleGetInboxMessageById(self, hid, readStatus=None): + """ + Returns a dict with list containing single message in the result + key *inboxMessage*. May also return None if message was not found. + + :param str hid: hex encoded msgid + :param bool readStatus: sets the message's read status if present + """ + + msgid = self._decode(hid, "hex") + if readStatus is not None: + if not isinstance(readStatus, bool): + raise APIError( + 23, 'Bool expected in readStatus, saw %s instead.' + % type(readStatus)) + queryreturn = sqlQuery( + "SELECT read FROM inbox WHERE msgid=?", msgid) + # UPDATE is slow, only update if status is different + try: + if (queryreturn[0][0] == 1) != readStatus: + sqlExecute( + "UPDATE inbox set read = ? WHERE msgid=?", + readStatus, msgid) + queues.UISignalQueue.put(('changedInboxUnread', None)) + except IndexError: + pass + queryreturn = sqlQuery( + "SELECT msgid, toaddress, fromaddress, subject, received, message," + " encodingtype, read FROM inbox WHERE msgid=?", msgid + ) + try: + return {"inboxMessage": [ + self._dump_inbox_message(*queryreturn[0])]} + except IndexError: + pass # FIXME inconsistent + + @command('getAllSentMessages') + def HandleGetAllSentMessages(self): + """ + The same as *getAllInboxMessages* but for sent, + result key - *sentMessages*. Message dict keys are: + *msgid*, *toAddress*, *fromAddress*, *subject*, *message*, + *encodingType*, *lastActionTime*, *status*, *ackData*. + *ackData* is also a hex encoded string. + """ + + queryreturn = sqlQuery( + "SELECT msgid, toaddress, fromaddress, subject, lastactiontime," + " message, encodingtype, status, ackdata FROM sent" + " WHERE folder='sent' ORDER BY lastactiontime" + ) + return {"sentMessages": [ + self._dump_sent_message(*data) for data in queryreturn + ]} + + @command('getAllSentMessageIds', 'getAllSentMessageIDs') + def HandleGetAllSentMessageIds(self): + """ + The same as *getAllInboxMessageIds* but for sent, + result key - *sentMessageIds*. + """ + + queryreturn = sqlQuery( + "SELECT msgid FROM sent WHERE folder='sent'" + " ORDER BY lastactiontime" + ) + return {"sentMessageIds": [ + {'msgid': hexlify(msgid)} for msgid, in queryreturn + ]} + + # after some time getInboxMessagesByAddress should be removed + @command('getInboxMessagesByReceiver', 'legacy:getInboxMessagesByAddress') + def HandleInboxMessagesByReceiver(self, toAddress): + """ + The same as *getAllInboxMessages* but returns only messages + for toAddress. + """ + + queryreturn = sqlQuery( + "SELECT msgid, toaddress, fromaddress, subject, received," + " message, encodingtype, read FROM inbox WHERE folder='inbox'" + " AND toAddress=?", toAddress) + return {"inboxMessages": [ + self._dump_inbox_message(*data) for data in queryreturn + ]} + + @command('getSentMessageById', 'getSentMessageByID') + def HandleGetSentMessageById(self, hid): + """ + Similiar to *getInboxMessageById* but doesn't change message's + read status (sent messages have no such field). + Result key is *sentMessage* + """ + + msgid = self._decode(hid, "hex") + queryreturn = sqlQuery( + "SELECT msgid, toaddress, fromaddress, subject, lastactiontime," + " message, encodingtype, status, ackdata FROM sent WHERE msgid=?", + msgid + ) + try: + return {"sentMessage": [ + self._dump_sent_message(*queryreturn[0]) + ]} + except IndexError: + pass # FIXME inconsistent + + @command('getSentMessagesByAddress', 'getSentMessagesBySender') + def HandleGetSentMessagesByAddress(self, fromAddress): + """ + The same as *getAllSentMessages* but returns only messages + from fromAddress. + """ + + queryreturn = sqlQuery( + "SELECT msgid, toaddress, fromaddress, subject, lastactiontime," + " message, encodingtype, status, ackdata FROM sent" + " WHERE folder='sent' AND fromAddress=? ORDER BY lastactiontime", + fromAddress + ) + return {"sentMessages": [ + self._dump_sent_message(*data) for data in queryreturn + ]} + + @command('getSentMessageByAckData') + def HandleGetSentMessagesByAckData(self, ackData): + """ + Similiar to *getSentMessageById* but searches by ackdata + (also hex encoded). + """ + + ackData = self._decode(ackData, "hex") + queryreturn = sqlQuery( + "SELECT msgid, toaddress, fromaddress, subject, lastactiontime," + " message, encodingtype, status, ackdata FROM sent" + " WHERE ackdata=?", ackData + ) + + try: + return {"sentMessage": [ + self._dump_sent_message(*queryreturn[0]) + ]} + except IndexError: + pass # FIXME inconsistent + + @command('trashMessage') + def HandleTrashMessage(self, msgid): + """ + Trash message by msgid (encoded in hex). Returns a simple message + saying that the message was trashed assuming it ever even existed. + Prior existence is not checked. + """ + msgid = self._decode(msgid, "hex") + # Trash if in inbox table + helper_inbox.trash(msgid) + # Trash if in sent table + sqlExecute("UPDATE sent SET folder='trash' WHERE msgid=?", msgid) + return 'Trashed message (assuming message existed).' + + @command('trashInboxMessage') + def HandleTrashInboxMessage(self, msgid): + """Trash inbox message by msgid (encoded in hex).""" + msgid = self._decode(msgid, "hex") + helper_inbox.trash(msgid) + return 'Trashed inbox message (assuming message existed).' + + @command('trashSentMessage') + def HandleTrashSentMessage(self, msgid): + """Trash sent message by msgid (encoded in hex).""" + msgid = self._decode(msgid, "hex") + sqlExecute('''UPDATE sent SET folder='trash' WHERE msgid=?''', msgid) + return 'Trashed sent message (assuming message existed).' + + @command('sendMessage') + def HandleSendMessage( + self, toAddress, fromAddress, subject, message, + encodingType=2, TTL=4 * 24 * 60 * 60 + ): + """ + Send the message and return ackdata (hex encoded string). + subject and message must be encoded in base64 which may optionally + include line breaks. TTL is specified in seconds; values outside + the bounds of 3600 to 2419200 will be moved to be within those + bounds. TTL defaults to 4 days. + """ + # pylint: disable=too-many-locals + if encodingType not in (2, 3): + raise APIError(6, 'The encoding type must be 2 or 3.') + subject = self._decode(subject, "base64") + message = self._decode(message, "base64") + if len(subject + message) > (2 ** 18 - 500): + raise APIError(27, 'Message is too long.') + if TTL < 60 * 60: + TTL = 60 * 60 + if TTL > 28 * 24 * 60 * 60: + TTL = 28 * 24 * 60 * 60 + toAddress = addBMIfNotPresent(toAddress) + fromAddress = addBMIfNotPresent(fromAddress) + self._verifyAddress(fromAddress) + try: + fromAddressEnabled = self.config.getboolean( + fromAddress, 'enabled') + except BaseException: + raise APIError( + 13, 'Could not find your fromAddress in the keys.dat file.') + if not fromAddressEnabled: + raise APIError(14, 'Your fromAddress is disabled. Cannot send.') + + ackdata = helper_sent.insert( + toAddress=toAddress, fromAddress=fromAddress, + subject=subject, message=message, encoding=encodingType, ttl=TTL) + + toLabel = '' + queryreturn = sqlQuery( + "SELECT label FROM addressbook WHERE address=?", toAddress) + try: + toLabel = queryreturn[0][0] + except IndexError: + pass + + queues.UISignalQueue.put(('displayNewSentMessage', ( + toAddress, toLabel, fromAddress, subject, message, ackdata))) + queues.workerQueue.put(('sendmessage', toAddress)) + + return hexlify(ackdata) + + @command('sendBroadcast') + def HandleSendBroadcast( + self, fromAddress, subject, message, encodingType=2, + TTL=4 * 24 * 60 * 60): + """Send the broadcast message. Similiar to *sendMessage*.""" + + if encodingType not in (2, 3): + raise APIError(6, 'The encoding type must be 2 or 3.') + + subject = self._decode(subject, "base64") + message = self._decode(message, "base64") + if len(subject + message) > (2 ** 18 - 500): + raise APIError(27, 'Message is too long.') + if TTL < 60 * 60: + TTL = 60 * 60 + if TTL > 28 * 24 * 60 * 60: + TTL = 28 * 24 * 60 * 60 + fromAddress = addBMIfNotPresent(fromAddress) + self._verifyAddress(fromAddress) + try: + self.config.getboolean(fromAddress, 'enabled') + except BaseException: + raise APIError( + 13, 'Could not find your fromAddress in the keys.dat file.') + toAddress = str_broadcast_subscribers + + ackdata = helper_sent.insert( + fromAddress=fromAddress, subject=subject, + message=message, status='broadcastqueued', + encoding=encodingType) + + toLabel = str_broadcast_subscribers + queues.UISignalQueue.put(('displayNewSentMessage', ( + toAddress, toLabel, fromAddress, subject, message, ackdata))) + queues.workerQueue.put(('sendbroadcast', '')) + + return hexlify(ackdata) + + @command('getStatus') + def HandleGetStatus(self, ackdata): + """ + Get the status of sent message by its ackdata (hex encoded). + Returns one of these strings: notfound, msgqueued, + broadcastqueued, broadcastsent, doingpubkeypow, awaitingpubkey, + doingmsgpow, forcepow, msgsent, msgsentnoackexpected or ackreceived. + """ + + if len(ackdata) < 76: + # The length of ackData should be at least 38 bytes (76 hex digits) + raise APIError(15, 'Invalid ackData object size.') + ackdata = self._decode(ackdata, "hex") + queryreturn = sqlQuery( + "SELECT status FROM sent where ackdata=?", ackdata) + try: + return queryreturn[0][0] + except IndexError: + return 'notfound' + + @command('addSubscription') + def HandleAddSubscription(self, address, label=''): + """Subscribe to the address. label must be base64 encoded.""" + + if label: + label = self._decode(label, "base64") + try: + label.decode('utf-8') + except UnicodeDecodeError: + raise APIError(17, 'Label is not valid UTF-8 data.') + self._verifyAddress(address) + address = addBMIfNotPresent(address) + # First we must check to see if the address is already in the + # subscriptions list. + queryreturn = sqlQuery( + "SELECT * FROM subscriptions WHERE address=?", address) + if queryreturn: + raise APIError(16, 'You are already subscribed to that address.') + sqlExecute( + "INSERT INTO subscriptions VALUES (?,?,?)", label, address, True) + shared.reloadBroadcastSendersForWhichImWatching() + queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) + queues.UISignalQueue.put(('rerenderSubscriptions', '')) + return 'Added subscription.' + + @command('deleteSubscription') + def HandleDeleteSubscription(self, address): + """ + Unsubscribe from the address. The program does not check whether + you were subscribed in the first place. + """ + + address = addBMIfNotPresent(address) + sqlExecute("DELETE FROM subscriptions WHERE address=?", address) + shared.reloadBroadcastSendersForWhichImWatching() + queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) + queues.UISignalQueue.put(('rerenderSubscriptions', '')) + return 'Deleted subscription if it existed.' + + @command('listSubscriptions') + def ListSubscriptions(self): + """ + Returns dict with a list of all subscriptions + in the *subscriptions* key. + """ + + queryreturn = sqlQuery( + "SELECT label, address, enabled FROM subscriptions") + data = [] + for label, address, enabled in queryreturn: + label = shared.fixPotentiallyInvalidUTF8Data(label) + data.append({ + 'label': base64.b64encode(label), + 'address': address, + 'enabled': enabled == 1 + }) + return {'subscriptions': data} + + @command('disseminatePreEncryptedMsg') + def HandleDisseminatePreEncryptedMsg( + self, encryptedPayload, requiredAverageProofOfWorkNonceTrialsPerByte, + requiredPayloadLengthExtraBytes): + """Handle a request to disseminate an encrypted message""" + + # The device issuing this command to PyBitmessage supplies a msg + # object that has already been encrypted but which still needs the POW + # to be done. PyBitmessage accepts this msg object and sends it out + # to the rest of the Bitmessage network as if it had generated + # the message itself. Please do not yet add this to the api doc. + encryptedPayload = self._decode(encryptedPayload, "hex") + # Let us do the POW and attach it to the front + target = 2**64 / ( + ( + len(encryptedPayload) + + requiredPayloadLengthExtraBytes + + 8 + ) * requiredAverageProofOfWorkNonceTrialsPerByte) + logger.info( + '(For msg message via API) Doing proof of work. Total required' + ' difficulty: %s\nRequired small message difficulty: %s', + float(requiredAverageProofOfWorkNonceTrialsPerByte) + / defaults.networkDefaultProofOfWorkNonceTrialsPerByte, + float(requiredPayloadLengthExtraBytes) + / defaults.networkDefaultPayloadLengthExtraBytes, + ) + powStartTime = time.time() + initialHash = hashlib.sha512(encryptedPayload).digest() + trialValue, nonce = proofofwork.run(target, initialHash) + logger.info( + '(For msg message via API) Found proof of work %s\nNonce: %s\n' + 'POW took %s seconds. %s nonce trials per second.', + trialValue, nonce, int(time.time() - powStartTime), + nonce / (time.time() - powStartTime) + ) + encryptedPayload = pack('>Q', nonce) + encryptedPayload + toStreamNumber = decodeVarint(encryptedPayload[16:26])[0] + inventoryHash = calculateInventoryHash(encryptedPayload) + objectType = 2 + TTL = 2.5 * 24 * 60 * 60 + Inventory()[inventoryHash] = ( + objectType, toStreamNumber, encryptedPayload, + int(time.time()) + TTL, '' + ) + logger.info( + 'Broadcasting inv for msg(API disseminatePreEncryptedMsg' + ' command): %s', hexlify(inventoryHash)) + queues.invQueue.put((toStreamNumber, inventoryHash)) + + @command('trashSentMessageByAckData') + def HandleTrashSentMessageByAckDAta(self, ackdata): + """Trash a sent message by ackdata (hex encoded)""" + # This API method should only be used when msgid is not available + ackdata = self._decode(ackdata, "hex") + sqlExecute("UPDATE sent SET folder='trash' WHERE ackdata=?", ackdata) + return 'Trashed sent message (assuming message existed).' + + @command('disseminatePubkey') + def HandleDissimatePubKey(self, payload): + """Handle a request to disseminate a public key""" + + # The device issuing this command to PyBitmessage supplies a pubkey + # object to be disseminated to the rest of the Bitmessage network. + # PyBitmessage accepts this pubkey object and sends it out to the rest + # of the Bitmessage network as if it had generated the pubkey object + # itself. Please do not yet add this to the api doc. + payload = self._decode(payload, "hex") + + # Let us do the POW + target = 2 ** 64 / (( + len(payload) + defaults.networkDefaultPayloadLengthExtraBytes + 8 + ) * defaults.networkDefaultProofOfWorkNonceTrialsPerByte) + logger.info('(For pubkey message via API) Doing proof of work...') + initialHash = hashlib.sha512(payload).digest() + trialValue, nonce = proofofwork.run(target, initialHash) + logger.info( + '(For pubkey message via API) Found proof of work %s Nonce: %s', + trialValue, nonce + ) + payload = pack('>Q', nonce) + payload + + pubkeyReadPosition = 8 # bypass the nonce + if payload[pubkeyReadPosition:pubkeyReadPosition + 4] == \ + '\x00\x00\x00\x00': # if this pubkey uses 8 byte time + pubkeyReadPosition += 8 + else: + pubkeyReadPosition += 4 + addressVersionLength = decodeVarint( + payload[pubkeyReadPosition:pubkeyReadPosition + 10])[1] + pubkeyReadPosition += addressVersionLength + pubkeyStreamNumber = decodeVarint( + payload[pubkeyReadPosition:pubkeyReadPosition + 10])[0] + inventoryHash = calculateInventoryHash(payload) + objectType = 1 # .. todo::: support v4 pubkeys + TTL = 28 * 24 * 60 * 60 + Inventory()[inventoryHash] = ( + objectType, pubkeyStreamNumber, payload, int(time.time()) + TTL, '' + ) + logger.info( + 'broadcasting inv within API command disseminatePubkey with' + ' hash: %s', hexlify(inventoryHash)) + queues.invQueue.put((pubkeyStreamNumber, inventoryHash)) + + @command( + 'getMessageDataByDestinationHash', 'getMessageDataByDestinationTag') + def HandleGetMessageDataByDestinationHash(self, requestedHash): + """Handle a request to get message data by destination hash""" + + # Method will eventually be used by a particular Android app to + # select relevant messages. Do not yet add this to the api + # doc. + if len(requestedHash) != 32: + raise APIError( + 19, 'The length of hash should be 32 bytes (encoded in hex' + ' thus 64 characters).') + requestedHash = self._decode(requestedHash, "hex") + + # This is not a particularly commonly used API function. Before we + # use it we'll need to fill out a field in our inventory database + # which is blank by default (first20bytesofencryptedmessage). + queryreturn = sqlQuery( + "SELECT hash, payload FROM inventory WHERE tag = ''" + " and objecttype = 2") + with SqlBulkExecute() as sql: + for hash01, payload in queryreturn: + readPosition = 16 # Nonce length + time length + # Stream Number length + readPosition += decodeVarint( + payload[readPosition:readPosition + 10])[1] + t = (payload[readPosition:readPosition + 32], hash01) + sql.execute("UPDATE inventory SET tag=? WHERE hash=?", *t) + + queryreturn = sqlQuery( + "SELECT payload FROM inventory WHERE tag = ?", requestedHash) + return {"receivedMessageDatas": [ + {'data': hexlify(payload)} for payload, in queryreturn + ]} + + @command('clientStatus') + def HandleClientStatus(self): + """ + Returns the bitmessage status as dict with keys *networkConnections*, + *numberOfMessagesProcessed*, *numberOfBroadcastsProcessed*, + *numberOfPubkeysProcessed*, *pendingDownload*, *networkStatus*, + *softwareName*, *softwareVersion*. *networkStatus* will be one of + these strings: "notConnected", + "connectedButHaveNotReceivedIncomingConnections", + or "connectedAndReceivingIncomingConnections". + """ + + connections_num = len(network.stats.connectedHostsList()) + if connections_num == 0: + networkStatus = 'notConnected' + elif state.clientHasReceivedIncomingConnections: + networkStatus = 'connectedAndReceivingIncomingConnections' + else: + networkStatus = 'connectedButHaveNotReceivedIncomingConnections' + return { + 'networkConnections': connections_num, + 'numberOfMessagesProcessed': state.numberOfMessagesProcessed, + 'numberOfBroadcastsProcessed': state.numberOfBroadcastsProcessed, + 'numberOfPubkeysProcessed': state.numberOfPubkeysProcessed, + 'pendingDownload': network.stats.pendingDownload(), + 'networkStatus': networkStatus, + 'softwareName': 'PyBitmessage', + 'softwareVersion': softwareVersion + } + + @command('helloWorld') + def HandleHelloWorld(self, a, b): + """Test two string params""" + return a + '-' + b + + @command('add') + def HandleAdd(self, a, b): + """Test two numeric params""" + return a + b + + @testmode('clearUISignalQueue') + def HandleclearUISignalQueue(self): + """clear UISignalQueue""" + queues.UISignalQueue.queue.clear() + return "success" + + @command('statusBar') + def HandleStatusBar(self, message): + """Update GUI statusbar message""" + queues.UISignalQueue.put(('updateStatusBar', message)) + + @testmode('getStatusBar') + def HandleGetStatusBar(self): + """Get GUI statusbar message""" + try: + _, data = queues.UISignalQueue.get(block=False) + except queue.Empty: + return None + return data + + @testmode('undeleteMessage') + def HandleUndeleteMessage(self, msgid): + """Undelete message""" + msgid = self._decode(msgid, "hex") + helper_inbox.undeleteMessage(msgid) + return "Undeleted message" + + @command('deleteAndVacuum') + def HandleDeleteAndVacuum(self): + """Cleanup trashes and vacuum messages database""" + sqlStoredProcedure('deleteandvacuume') + return 'done' + + @command('shutdown') + def HandleShutdown(self): + """Shutdown the bitmessage. Returns 'done'.""" + # backward compatible trick because False == 0 is True + state.shutdown = False + return 'done' + + def _handle_request(self, method, params): + try: + # pylint: disable=attribute-defined-outside-init + self._method = method + func = self._handlers[method] + return func(self, *params) + except KeyError: + raise APIError(20, 'Invalid method: %s' % method) + except TypeError as e: + msg = 'Unexpected API Failure - %s' % e + if 'argument' not in str(e): + raise APIError(21, msg) + argcount = len(params) + maxcount = func.func_code.co_argcount + if argcount > maxcount: + msg = ( + 'Command %s takes at most %s parameters (%s given)' + % (method, maxcount, argcount)) + else: + mincount = maxcount - len(func.func_defaults or []) + if argcount < mincount: + msg = ( + 'Command %s takes at least %s parameters (%s given)' + % (method, mincount, argcount)) + raise APIError(0, msg) + finally: + state.last_api_response = time.time() + + def _dispatch(self, method, params): + _fault = None + + try: + return self._handle_request(method, params) + except APIError as e: + _fault = e + except varintDecodeError as e: + logger.error(e) + _fault = APIError( + 26, 'Data contains a malformed varint. Some details: %s' % e) + except Exception as e: + logger.exception(e) + _fault = APIError(21, 'Unexpected API Failure - %s' % e) + + if _fault: + if self.config.safeGet( + 'bitmessagesettings', 'apivariant') == 'legacy': + return str(_fault) + else: + raise _fault # pylint: disable=raising-bad-type + + def _listMethods(self): + """List all API commands""" + return self._handlers.keys() + + def _methodHelp(self, method): + return self._handlers[method].__doc__ diff --git a/src/tests/mock/pybitmessage/bitmessagecli.py b/src/tests/mock/pybitmessage/bitmessagecli.py new file mode 100644 index 00000000..adcab8b1 --- /dev/null +++ b/src/tests/mock/pybitmessage/bitmessagecli.py @@ -0,0 +1,1887 @@ +#!/usr/bin/python2.7 +# -*- coding: utf-8 -*- +# pylint: disable=too-many-lines,global-statement,too-many-branches,too-many-statements,inconsistent-return-statements +# pylint: disable=too-many-nested-blocks,too-many-locals,protected-access,too-many-arguments,too-many-function-args +# pylint: disable=no-member +""" +Created by Adam Melton (.dok) referenceing https://bitmessage.org/wiki/API_Reference for API documentation +Distributed under the MIT/X11 software license. See http://www.opensource.org/licenses/mit-license.php. + +This is an example of a daemon client for PyBitmessage 0.6.2, by .dok (Version 0.3.1) , modified + +TODO: fix the following (currently ignored) violations: + +""" + +import datetime +import imghdr +import json +import ntpath +import os +import socket +import sys +import time +import xmlrpclib + +from bmconfigparser import BMConfigParser + + +api = '' +keysName = 'keys.dat' +keysPath = 'keys.dat' +usrPrompt = 0 # 0 = First Start, 1 = prompt, 2 = no prompt if the program is starting up +knownAddresses = dict() + + +def userInput(message): + """Checks input for exit or quit. Also formats for input, etc""" + + global usrPrompt + + print('\n' + message) + uInput = raw_input('> ') + + if uInput.lower() == 'exit': # Returns the user to the main menu + usrPrompt = 1 + main() + + elif uInput.lower() == 'quit': # Quits the program + print('\n Bye\n') + sys.exit(0) + + else: + return uInput + + +def restartBmNotify(): + """Prompt the user to restart Bitmessage""" + print('\n *******************************************************************') + print(' WARNING: If Bitmessage is running locally, you must restart it now.') + print(' *******************************************************************\n') + + +# Begin keys.dat interactions + + +def lookupAppdataFolder(): + """gets the appropriate folders for the .dat files depending on the OS. Taken from bitmessagemain.py""" + + APPNAME = "PyBitmessage" + if sys.platform == 'darwin': + if "HOME" in os.environ: + dataFolder = os.path.join(os.environ["HOME"], "Library/Application support/", APPNAME) + '/' + else: + print( + ' Could not find home folder, please report ' + 'this message and your OS X version to the Daemon Github.') + sys.exit(1) + + elif 'win32' in sys.platform or 'win64' in sys.platform: + dataFolder = os.path.join(os.environ['APPDATA'], APPNAME) + '\\' + else: + dataFolder = os.path.expanduser(os.path.join("~", ".config/" + APPNAME + "/")) + return dataFolder + + +def configInit(): + """Initialised the configuration""" + + BMConfigParser().add_section('bitmessagesettings') + # Sets the bitmessage port to stop the warning about the api not properly + # being setup. This is in the event that the keys.dat is in a different + # directory or is created locally to connect to a machine remotely. + BMConfigParser().set('bitmessagesettings', 'port', '8444') + BMConfigParser().set('bitmessagesettings', 'apienabled', 'true') # Sets apienabled to true in keys.dat + + with open(keysName, 'wb') as configfile: + BMConfigParser().write(configfile) + + print('\n ' + str(keysName) + ' Initalized in the same directory as daemon.py') + print(' You will now need to configure the ' + str(keysName) + ' file.\n') + + +def apiInit(apiEnabled): + """Initialise the API""" + + global usrPrompt + BMConfigParser().read(keysPath) + + if apiEnabled is False: # API information there but the api is disabled. + uInput = userInput("The API is not enabled. Would you like to do that now, (Y)es or (N)o?").lower() + + if uInput == "y": + BMConfigParser().set('bitmessagesettings', 'apienabled', 'true') # Sets apienabled to true in keys.dat + with open(keysPath, 'wb') as configfile: + BMConfigParser().write(configfile) + + print('Done') + restartBmNotify() + return True + + elif uInput == "n": + print(' \n************************************************************') + print(' Daemon will not work when the API is disabled. ') + print(' Please refer to the Bitmessage Wiki on how to setup the API.') + print(' ************************************************************\n') + usrPrompt = 1 + main() + + else: + print('\n Invalid Entry\n') + usrPrompt = 1 + main() + + elif apiEnabled: # API correctly setup + # Everything is as it should be + return True + + else: # API information was not present. + print('\n ' + str(keysPath) + ' not properly configured!\n') + uInput = userInput("Would you like to do this now, (Y)es or (N)o?").lower() + + if uInput == "y": # User said yes, initalize the api by writing these values to the keys.dat file + print(' ') + + apiUsr = userInput("API Username") + apiPwd = userInput("API Password") + apiPort = userInput("API Port") + apiEnabled = userInput("API Enabled? (True) or (False)").lower() + daemon = userInput("Daemon mode Enabled? (True) or (False)").lower() + + if (daemon != 'true' and daemon != 'false'): + print('\n Invalid Entry for Daemon.\n') + uInput = 1 + main() + + print(' -----------------------------------\n') + + # sets the bitmessage port to stop the warning about the api not properly + # being setup. This is in the event that the keys.dat is in a different + # directory or is created locally to connect to a machine remotely. + BMConfigParser().set('bitmessagesettings', 'port', '8444') + BMConfigParser().set('bitmessagesettings', 'apienabled', 'true') + BMConfigParser().set('bitmessagesettings', 'apiport', apiPort) + BMConfigParser().set('bitmessagesettings', 'apiinterface', '127.0.0.1') + BMConfigParser().set('bitmessagesettings', 'apiusername', apiUsr) + BMConfigParser().set('bitmessagesettings', 'apipassword', apiPwd) + BMConfigParser().set('bitmessagesettings', 'daemon', daemon) + with open(keysPath, 'wb') as configfile: + BMConfigParser().write(configfile) + + print('\n Finished configuring the keys.dat file with API information.\n') + restartBmNotify() + return True + + elif uInput == "n": + print('\n ***********************************************************') + print(' Please refer to the Bitmessage Wiki on how to setup the API.') + print(' ***********************************************************\n') + usrPrompt = 1 + main() + else: + print(' \nInvalid entry\n') + usrPrompt = 1 + main() + + +def apiData(): + """TBC""" + + global keysName + global keysPath + global usrPrompt + + BMConfigParser().read(keysPath) # First try to load the config file (the keys.dat file) from the program directory + + try: + BMConfigParser().get('bitmessagesettings', 'port') + appDataFolder = '' + except: # noqa:E722 + # Could not load the keys.dat file in the program directory. Perhaps it is in the appdata directory. + appDataFolder = lookupAppdataFolder() + keysPath = appDataFolder + keysPath + BMConfigParser().read(keysPath) + + try: + BMConfigParser().get('bitmessagesettings', 'port') + except: # noqa:E722 + # keys.dat was not there either, something is wrong. + print('\n ******************************************************************') + print(' There was a problem trying to access the Bitmessage keys.dat file') + print(' or keys.dat is not set up correctly') + print(' Make sure that daemon is in the same directory as Bitmessage. ') + print(' ******************************************************************\n') + + uInput = userInput("Would you like to create a keys.dat in the local directory, (Y)es or (N)o?").lower() + + if uInput in ("y", "yes"): + configInit() + keysPath = keysName + usrPrompt = 0 + main() + elif uInput in ("n", "no"): + print('\n Trying Again.\n') + usrPrompt = 0 + main() + else: + print('\n Invalid Input.\n') + + usrPrompt = 1 + main() + + try: # checks to make sure that everyting is configured correctly. Excluding apiEnabled, it is checked after + BMConfigParser().get('bitmessagesettings', 'apiport') + BMConfigParser().get('bitmessagesettings', 'apiinterface') + BMConfigParser().get('bitmessagesettings', 'apiusername') + BMConfigParser().get('bitmessagesettings', 'apipassword') + + except: # noqa:E722 + apiInit("") # Initalize the keys.dat file with API information + + # keys.dat file was found or appropriately configured, allow information retrieval + # apiEnabled = + # apiInit(BMConfigParser().safeGetBoolean('bitmessagesettings','apienabled')) + # #if false it will prompt the user, if true it will return true + + BMConfigParser().read(keysPath) # read again since changes have been made + apiPort = int(BMConfigParser().get('bitmessagesettings', 'apiport')) + apiInterface = BMConfigParser().get('bitmessagesettings', 'apiinterface') + apiUsername = BMConfigParser().get('bitmessagesettings', 'apiusername') + apiPassword = BMConfigParser().get('bitmessagesettings', 'apipassword') + + print('\n API data successfully imported.\n') + + # Build the api credentials + return "http://" + apiUsername + ":" + apiPassword + "@" + apiInterface + ":" + str(apiPort) + "/" + + +# End keys.dat interactions + + +def apiTest(): + """Tests the API connection to bitmessage. Returns true if it is connected.""" + + try: + result = api.add(2, 3) + except: # noqa:E722 + return False + + return result == 5 + + +def bmSettings(): + """Allows the viewing and modification of keys.dat settings.""" + + global keysPath + global usrPrompt + + keysPath = 'keys.dat' + + BMConfigParser().read(keysPath) # Read the keys.dat + try: + port = BMConfigParser().get('bitmessagesettings', 'port') + except: # noqa:E722 + print('\n File not found.\n') + usrPrompt = 0 + main() + + startonlogon = BMConfigParser().safeGetBoolean('bitmessagesettings', 'startonlogon') + minimizetotray = BMConfigParser().safeGetBoolean('bitmessagesettings', 'minimizetotray') + showtraynotifications = BMConfigParser().safeGetBoolean('bitmessagesettings', 'showtraynotifications') + startintray = BMConfigParser().safeGetBoolean('bitmessagesettings', 'startintray') + defaultnoncetrialsperbyte = BMConfigParser().get('bitmessagesettings', 'defaultnoncetrialsperbyte') + defaultpayloadlengthextrabytes = BMConfigParser().get('bitmessagesettings', 'defaultpayloadlengthextrabytes') + daemon = BMConfigParser().safeGetBoolean('bitmessagesettings', 'daemon') + + socksproxytype = BMConfigParser().get('bitmessagesettings', 'socksproxytype') + sockshostname = BMConfigParser().get('bitmessagesettings', 'sockshostname') + socksport = BMConfigParser().get('bitmessagesettings', 'socksport') + socksauthentication = BMConfigParser().safeGetBoolean('bitmessagesettings', 'socksauthentication') + socksusername = BMConfigParser().get('bitmessagesettings', 'socksusername') + sockspassword = BMConfigParser().get('bitmessagesettings', 'sockspassword') + + print('\n -----------------------------------') + print(' | Current Bitmessage Settings |') + print(' -----------------------------------') + print(' port = ' + port) + print(' startonlogon = ' + str(startonlogon)) + print(' minimizetotray = ' + str(minimizetotray)) + print(' showtraynotifications = ' + str(showtraynotifications)) + print(' startintray = ' + str(startintray)) + print(' defaultnoncetrialsperbyte = ' + defaultnoncetrialsperbyte) + print(' defaultpayloadlengthextrabytes = ' + defaultpayloadlengthextrabytes) + print(' daemon = ' + str(daemon)) + print('\n ------------------------------------') + print(' | Current Connection Settings |') + print(' -----------------------------------') + print(' socksproxytype = ' + socksproxytype) + print(' sockshostname = ' + sockshostname) + print(' socksport = ' + socksport) + print(' socksauthentication = ' + str(socksauthentication)) + print(' socksusername = ' + socksusername) + print(' sockspassword = ' + sockspassword) + print(' ') + + uInput = userInput("Would you like to modify any of these settings, (Y)es or (N)o?").lower() + + if uInput == "y": + while True: # loops if they mistype the setting name, they can exit the loop with 'exit' + invalidInput = False + uInput = userInput("What setting would you like to modify?").lower() + print(' ') + + if uInput == "port": + print(' Current port number: ' + port) + uInput = userInput("Enter the new port number.") + BMConfigParser().set('bitmessagesettings', 'port', str(uInput)) + elif uInput == "startonlogon": + print(' Current status: ' + str(startonlogon)) + uInput = userInput("Enter the new status.") + BMConfigParser().set('bitmessagesettings', 'startonlogon', str(uInput)) + elif uInput == "minimizetotray": + print(' Current status: ' + str(minimizetotray)) + uInput = userInput("Enter the new status.") + BMConfigParser().set('bitmessagesettings', 'minimizetotray', str(uInput)) + elif uInput == "showtraynotifications": + print(' Current status: ' + str(showtraynotifications)) + uInput = userInput("Enter the new status.") + BMConfigParser().set('bitmessagesettings', 'showtraynotifications', str(uInput)) + elif uInput == "startintray": + print(' Current status: ' + str(startintray)) + uInput = userInput("Enter the new status.") + BMConfigParser().set('bitmessagesettings', 'startintray', str(uInput)) + elif uInput == "defaultnoncetrialsperbyte": + print(' Current default nonce trials per byte: ' + defaultnoncetrialsperbyte) + uInput = userInput("Enter the new defaultnoncetrialsperbyte.") + BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(uInput)) + elif uInput == "defaultpayloadlengthextrabytes": + print(' Current default payload length extra bytes: ' + defaultpayloadlengthextrabytes) + uInput = userInput("Enter the new defaultpayloadlengthextrabytes.") + BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(uInput)) + elif uInput == "daemon": + print(' Current status: ' + str(daemon)) + uInput = userInput("Enter the new status.").lower() + BMConfigParser().set('bitmessagesettings', 'daemon', str(uInput)) + elif uInput == "socksproxytype": + print(' Current socks proxy type: ' + socksproxytype) + print("Possibilities: 'none', 'SOCKS4a', 'SOCKS5'.") + uInput = userInput("Enter the new socksproxytype.") + BMConfigParser().set('bitmessagesettings', 'socksproxytype', str(uInput)) + elif uInput == "sockshostname": + print(' Current socks host name: ' + sockshostname) + uInput = userInput("Enter the new sockshostname.") + BMConfigParser().set('bitmessagesettings', 'sockshostname', str(uInput)) + elif uInput == "socksport": + print(' Current socks port number: ' + socksport) + uInput = userInput("Enter the new socksport.") + BMConfigParser().set('bitmessagesettings', 'socksport', str(uInput)) + elif uInput == "socksauthentication": + print(' Current status: ' + str(socksauthentication)) + uInput = userInput("Enter the new status.") + BMConfigParser().set('bitmessagesettings', 'socksauthentication', str(uInput)) + elif uInput == "socksusername": + print(' Current socks username: ' + socksusername) + uInput = userInput("Enter the new socksusername.") + BMConfigParser().set('bitmessagesettings', 'socksusername', str(uInput)) + elif uInput == "sockspassword": + print(' Current socks password: ' + sockspassword) + uInput = userInput("Enter the new password.") + BMConfigParser().set('bitmessagesettings', 'sockspassword', str(uInput)) + else: + print("\n Invalid input. Please try again.\n") + invalidInput = True + + if invalidInput is not True: # don't prompt if they made a mistake. + uInput = userInput("Would you like to change another setting, (Y)es or (N)o?").lower() + + if uInput != "y": + print('\n Changes Made.\n') + with open(keysPath, 'wb') as configfile: + BMConfigParser().write(configfile) + restartBmNotify() + break + + elif uInput == "n": + usrPrompt = 1 + main() + else: + print("Invalid input.") + usrPrompt = 1 + main() + + +def validAddress(address): + """Predicate to test address validity""" + address_information = json.loads(api.decodeAddress(address)) + + return 'success' in str(address_information['status']).lower() + + +def getAddress(passphrase, vNumber, sNumber): + """Get a deterministic address""" + passphrase = passphrase.encode('base64') # passphrase must be encoded + + return api.getDeterministicAddress(passphrase, vNumber, sNumber) + + +def subscribe(): + """Subscribe to an address""" + global usrPrompt + + while True: + address = userInput("What address would you like to subscribe to?") + + if address == "c": + usrPrompt = 1 + print(' ') + main() + elif validAddress(address) is False: + print('\n Invalid. "c" to cancel. Please try again.\n') + else: + break + + label = userInput("Enter a label for this address.") + label = label.encode('base64') + + api.addSubscription(address, label) + print('\n You are now subscribed to: ' + address + '\n') + + +def unsubscribe(): + """Unsusbcribe from an address""" + global usrPrompt + + while True: + address = userInput("What address would you like to unsubscribe from?") + + if address == "c": + usrPrompt = 1 + print(' ') + main() + elif validAddress(address) is False: + print('\n Invalid. "c" to cancel. Please try again.\n') + else: + break + + userInput("Are you sure, (Y)es or (N)o?").lower() # uInput = + + api.deleteSubscription(address) + print('\n You are now unsubscribed from: ' + address + '\n') + + +def listSubscriptions(): + """List subscriptions""" + + global usrPrompt + print('\nLabel, Address, Enabled\n') + try: + print(api.listSubscriptions()) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + print(' ') + + +def createChan(): + """Create a channel""" + + global usrPrompt + password = userInput("Enter channel name") + password = password.encode('base64') + try: + print(api.createChan(password)) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def joinChan(): + """Join a channel""" + + global usrPrompt + while True: + address = userInput("Enter channel address") + + if address == "c": + usrPrompt = 1 + print(' ') + main() + elif validAddress(address) is False: + print('\n Invalid. "c" to cancel. Please try again.\n') + else: + break + + password = userInput("Enter channel name") + password = password.encode('base64') + try: + print(api.joinChan(password, address)) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def leaveChan(): + """Leave a channel""" + + global usrPrompt + while True: + address = userInput("Enter channel address") + + if address == "c": + usrPrompt = 1 + print(' ') + main() + elif validAddress(address) is False: + print('\n Invalid. "c" to cancel. Please try again.\n') + else: + break + + try: + print(api.leaveChan(address)) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def listAdd(): + """List all of the addresses and their info""" + global usrPrompt + try: + jsonAddresses = json.loads(api.listAddresses()) + numAddresses = len(jsonAddresses['addresses']) # Number of addresses + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + # print('\nAddress Number,Label,Address,Stream,Enabled\n') + print('\n --------------------------------------------------------------------------') + print(' | # | Label | Address |S#|Enabled|') + print(' |---|-------------------|-------------------------------------|--|-------|') + for addNum in range(0, numAddresses): # processes all of the addresses and lists them out + label = (jsonAddresses['addresses'][addNum]['label']).encode( + 'utf') # may still misdiplay in some consoles + address = str(jsonAddresses['addresses'][addNum]['address']) + stream = str(jsonAddresses['addresses'][addNum]['stream']) + enabled = str(jsonAddresses['addresses'][addNum]['enabled']) + + if len(label) > 19: + label = label[:16] + '...' + + print(''.join([ + ' |', + str(addNum).ljust(3), + '|', + label.ljust(19), + '|', + address.ljust(37), + '|', + stream.ljust(1), + '|', + enabled.ljust(7), + '|', + ])) + + print(''.join([ + ' ', + 74 * '-', + '\n', + ])) + + +def genAdd(lbl, deterministic, passphrase, numOfAdd, addVNum, streamNum, ripe): + """Generate address""" + + global usrPrompt + + if deterministic is False: # Generates a new address with the user defined label. non-deterministic + addressLabel = lbl.encode('base64') + try: + generatedAddress = api.createRandomAddress(addressLabel) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + return generatedAddress + + elif deterministic: # Generates a new deterministic address with the user inputs. + passphrase = passphrase.encode('base64') + try: + generatedAddress = api.createDeterministicAddresses(passphrase, numOfAdd, addVNum, streamNum, ripe) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + return generatedAddress + + return 'Entry Error' + + +def saveFile(fileName, fileData): + """Allows attachments and messages/broadcats to be saved""" + + # This section finds all invalid characters and replaces them with ~ + fileName = fileName.replace(" ", "") + fileName = fileName.replace("/", "~") + # fileName = fileName.replace("\\", "~") How do I get this to work...? + fileName = fileName.replace(":", "~") + fileName = fileName.replace("*", "~") + fileName = fileName.replace("?", "~") + fileName = fileName.replace('"', "~") + fileName = fileName.replace("<", "~") + fileName = fileName.replace(">", "~") + fileName = fileName.replace("|", "~") + + directory = os.path.abspath('attachments') + + if not os.path.exists(directory): + os.makedirs(directory) + + filePath = os.path.join(directory, fileName) + + with open(filePath, 'wb+') as path_to_file: + path_to_file.write(fileData.decode("base64")) + print('\n Successfully saved ' + filePath + '\n') + + +def attachment(): + """Allows users to attach a file to their message or broadcast""" + + theAttachmentS = '' + + while True: + + isImage = False + theAttachment = '' + + while True: # loops until valid path is entered + filePath = userInput( + '\nPlease enter the path to the attachment or just the attachment name if in this folder.') + + try: + with open(filePath): + break + except IOError: + print('\n %s was not found on your filesystem or can not be opened.\n' % filePath) + + # print(filesize, and encoding estimate with confirmation if file is over X size(1mb?)) + invSize = os.path.getsize(filePath) + invSize = (invSize / 1024) # Converts to kilobytes + round(invSize, 2) # Rounds to two decimal places + + if invSize > 500.0: # If over 500KB + print(''.join([ + '\n WARNING:The file that you are trying to attach is ', + invSize, + 'KB and will take considerable time to send.\n' + ])) + uInput = userInput('Are you sure you still want to attach it, (Y)es or (N)o?').lower() + + if uInput != "y": + print('\n Attachment discarded.\n') + return '' + elif invSize > 184320.0: # If larger than 180MB, discard. + print('\n Attachment too big, maximum allowed size:180MB\n') + main() + + pathLen = len(str(ntpath.basename(filePath))) # Gets the length of the filepath excluding the filename + fileName = filePath[(len(str(filePath)) - pathLen):] # reads the filename + + filetype = imghdr.what(filePath) # Tests if it is an image file + if filetype is not None: + print('\n ---------------------------------------------------') + print(' Attachment detected as an Image.') + print(' tags will automatically be included,') + print(' allowing the recipient to view the image') + print(' using the "View HTML code..." option in Bitmessage.') + print(' ---------------------------------------------------\n') + isImage = True + time.sleep(2) + + # Alert the user that the encoding process may take some time. + print('\n Encoding Attachment, Please Wait ...\n') + + with open(filePath, 'rb') as f: # Begin the actual encoding + data = f.read(188743680) # Reads files up to 180MB, the maximum size for Bitmessage. + data = data.encode("base64") + + if isImage: # If it is an image, include image tags in the message + theAttachment = """ + + + +Filename:%s +Filesize:%sKB +Encoding:base64 + +
+
+ %s +
+
""" % (fileName, invSize, fileName, filetype, data) + else: # Else it is not an image so do not include the embedded image code. + theAttachment = """ + + + +Filename:%s +Filesize:%sKB +Encoding:base64 + +""" % (fileName, invSize, fileName, fileName, data) + + uInput = userInput('Would you like to add another attachment, (Y)es or (N)o?').lower() + + if uInput in ('y', 'yes'): # Allows multiple attachments to be added to one message + theAttachmentS = str(theAttachmentS) + str(theAttachment) + '\n\n' + elif uInput in ('n', 'no'): + break + + theAttachmentS = theAttachmentS + theAttachment + return theAttachmentS + + +def sendMsg(toAddress, fromAddress, subject, message): + """ + With no arguments sent, sendMsg fills in the blanks. + subject and message must be encoded before they are passed. + """ + + global usrPrompt + if validAddress(toAddress) is False: + while True: + toAddress = userInput("What is the To Address?") + + if toAddress == "c": + usrPrompt = 1 + print(' ') + main() + elif validAddress(toAddress) is False: + print('\n Invalid Address. "c" to cancel. Please try again.\n') + else: + break + + if validAddress(fromAddress) is False: + try: + jsonAddresses = json.loads(api.listAddresses()) + numAddresses = len(jsonAddresses['addresses']) # Number of addresses + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + if numAddresses > 1: # Ask what address to send from if multiple addresses + found = False + while True: + print(' ') + fromAddress = userInput("Enter an Address or Address Label to send from.") + + if fromAddress == "exit": + usrPrompt = 1 + main() + + for addNum in range(0, numAddresses): # processes all of the addresses + label = jsonAddresses['addresses'][addNum]['label'] + address = jsonAddresses['addresses'][addNum]['address'] + if fromAddress == label: # address entered was a label and is found + fromAddress = address + found = True + break + + if found is False: + if validAddress(fromAddress) is False: + print('\n Invalid Address. Please try again.\n') + + else: + for addNum in range(0, numAddresses): # processes all of the addresses + address = jsonAddresses['addresses'][addNum]['address'] + if fromAddress == address: # address entered was a found in our addressbook. + found = True + break + + if found is False: + print('\n The address entered is not one of yours. Please try again.\n') + + if found: + break # Address was found + + else: # Only one address in address book + print('\n Using the only address in the addressbook to send from.\n') + fromAddress = jsonAddresses['addresses'][0]['address'] + + if not subject: + subject = userInput("Enter your Subject.") + subject = subject.encode('base64') + if not message: + message = userInput("Enter your Message.") + + uInput = userInput('Would you like to add an attachment, (Y)es or (N)o?').lower() + if uInput == "y": + message = message + '\n\n' + attachment() + + message = message.encode('base64') + + try: + ackData = api.sendMessage(toAddress, fromAddress, subject, message) + print('\n Message Status:', api.getStatus(ackData), '\n') + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def sendBrd(fromAddress, subject, message): + """Send a broadcast""" + + global usrPrompt + if not fromAddress: + + try: + jsonAddresses = json.loads(api.listAddresses()) + numAddresses = len(jsonAddresses['addresses']) # Number of addresses + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + if numAddresses > 1: # Ask what address to send from if multiple addresses + found = False + while True: + fromAddress = userInput("\nEnter an Address or Address Label to send from.") + + if fromAddress == "exit": + usrPrompt = 1 + main() + + for addNum in range(0, numAddresses): # processes all of the addresses + label = jsonAddresses['addresses'][addNum]['label'] + address = jsonAddresses['addresses'][addNum]['address'] + if fromAddress == label: # address entered was a label and is found + fromAddress = address + found = True + break + + if found is False: + if validAddress(fromAddress) is False: + print('\n Invalid Address. Please try again.\n') + + else: + for addNum in range(0, numAddresses): # processes all of the addresses + address = jsonAddresses['addresses'][addNum]['address'] + if fromAddress == address: # address entered was a found in our addressbook. + found = True + break + + if found is False: + print('\n The address entered is not one of yours. Please try again.\n') + + if found: + break # Address was found + + else: # Only one address in address book + print('\n Using the only address in the addressbook to send from.\n') + fromAddress = jsonAddresses['addresses'][0]['address'] + + if not subject: + subject = userInput("Enter your Subject.") + subject = subject.encode('base64') + if not message: + message = userInput("Enter your Message.") + + uInput = userInput('Would you like to add an attachment, (Y)es or (N)o?').lower() + if uInput == "y": + message = message + '\n\n' + attachment() + + message = message.encode('base64') + + try: + ackData = api.sendBroadcast(fromAddress, subject, message) + print('\n Message Status:', api.getStatus(ackData), '\n') + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def inbox(unreadOnly=False): + """Lists the messages by: Message Number, To Address Label, From Address Label, Subject, Received Time)""" + + global usrPrompt + try: + inboxMessages = json.loads(api.getAllInboxMessages()) + numMessages = len(inboxMessages['inboxMessages']) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + messagesPrinted = 0 + messagesUnread = 0 + for msgNum in range(0, numMessages): # processes all of the messages in the inbox + message = inboxMessages['inboxMessages'][msgNum] + # if we are displaying all messages or if this message is unread then display it + if not unreadOnly or not message['read']: + print(' -----------------------------------\n') + print(' Message Number:', msgNum) # Message Number) + print(' To:', getLabelForAddress(message['toAddress'])) # Get the to address) + print(' From:', getLabelForAddress(message['fromAddress'])) # Get the from address) + print(' Subject:', message['subject'].decode('base64')) # Get the subject) + print(''.join([ + ' Received:', + datetime.datetime.fromtimestamp( + float(message['receivedTime'])).strftime('%Y-%m-%d %H:%M:%S'), + ])) + messagesPrinted += 1 + if not message['read']: + messagesUnread += 1 + + if messagesPrinted % 20 == 0 and messagesPrinted != 0: + userInput('(Press Enter to continue or type (Exit) to return to the main menu.)').lower() # uInput = + + print('\n -----------------------------------') + print(' There are %d unread messages of %d messages in the inbox.' % (messagesUnread, numMessages)) + print(' -----------------------------------\n') + + +def outbox(): + """TBC""" + + global usrPrompt + try: + outboxMessages = json.loads(api.getAllSentMessages()) + numMessages = len(outboxMessages['sentMessages']) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + for msgNum in range(0, numMessages): # processes all of the messages in the outbox + print('\n -----------------------------------\n') + print(' Message Number:', msgNum) # Message Number) + # print(' Message ID:', outboxMessages['sentMessages'][msgNum]['msgid']) + print(' To:', getLabelForAddress( + outboxMessages['sentMessages'][msgNum]['toAddress'] + )) # Get the to address) + # Get the from address + print(' From:', getLabelForAddress(outboxMessages['sentMessages'][msgNum]['fromAddress'])) + print(' Subject:', outboxMessages['sentMessages'][msgNum]['subject'].decode('base64')) # Get the subject) + print(' Status:', outboxMessages['sentMessages'][msgNum]['status']) # Get the subject) + + # print(''.join([ + # ' Last Action Time:', + # datetime.datetime.fromtimestamp( + # float(outboxMessages['sentMessages'][msgNum]['lastActionTime'])).strftime('%Y-%m-%d %H:%M:%S'), + # ])) + print(''.join([ + ' Last Action Time:', + datetime.datetime.fromtimestamp( + float(outboxMessages['sentMessages'][msgNum]['lastActionTime'])).strftime('%Y-%m-%d %H:%M:%S'), + ])) + + if msgNum % 20 == 0 and msgNum != 0: + userInput('(Press Enter to continue or type (Exit) to return to the main menu.)').lower() # uInput = + + print('\n -----------------------------------') + print(' There are ', numMessages, ' messages in the outbox.') + print(' -----------------------------------\n') + + +def readSentMsg(msgNum): + """Opens a sent message for reading""" + + global usrPrompt + try: + outboxMessages = json.loads(api.getAllSentMessages()) + numMessages = len(outboxMessages['sentMessages']) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + print(' ') + + if msgNum >= numMessages: + print('\n Invalid Message Number.\n') + main() + + # Begin attachment detection + message = outboxMessages['sentMessages'][msgNum]['message'].decode('base64') + + while True: # Allows multiple messages to be downloaded/saved + if ';base64,' in message: # Found this text in the message, there is probably an attachment. + attPos = message.index(";base64,") # Finds the attachment position + attEndPos = message.index("' />") # Finds the end of the attachment + # attLen = attEndPos - attPos #Finds the length of the message + + if 'alt = "' in message: # We can get the filename too + fnPos = message.index('alt = "') # Finds position of the filename + fnEndPos = message.index('" src=') # Finds the end position + # fnLen = fnEndPos - fnPos #Finds the length of the filename + + fileName = message[fnPos + 7:fnEndPos] + else: + fnPos = attPos + fileName = 'Attachment' + + uInput = userInput( + '\n Attachment Detected. Would you like to save the attachment, (Y)es or (N)o?').lower() + if uInput in ("y", 'yes'): + + this_attachment = message[attPos + 9:attEndPos] + saveFile(fileName, this_attachment) + + message = message[:fnPos] + '~~' + message[(attEndPos + 4):] + + else: + break + + # End attachment Detection + + print('\n To:', getLabelForAddress(outboxMessages['sentMessages'][msgNum]['toAddress'])) # Get the to address) + # Get the from address + print(' From:', getLabelForAddress(outboxMessages['sentMessages'][msgNum]['fromAddress'])) + print(' Subject:', outboxMessages['sentMessages'][msgNum]['subject'].decode('base64')) # Get the subject) + print(' Status:', outboxMessages['sentMessages'][msgNum]['status']) # Get the subject) + print(''.join([ + ' Last Action Time:', + datetime.datetime.fromtimestamp( + float(outboxMessages['sentMessages'][msgNum]['lastActionTime'])).strftime('%Y-%m-%d %H:%M:%S'), + ])) + print(' Message:\n') + print(message) # inboxMessages['inboxMessages'][msgNum]['message'].decode('base64')) + print(' ') + + +def readMsg(msgNum): + """Open a message for reading""" + global usrPrompt + try: + inboxMessages = json.loads(api.getAllInboxMessages()) + numMessages = len(inboxMessages['inboxMessages']) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + if msgNum >= numMessages: + print('\n Invalid Message Number.\n') + main() + + # Begin attachment detection + message = inboxMessages['inboxMessages'][msgNum]['message'].decode('base64') + + while True: # Allows multiple messages to be downloaded/saved + if ';base64,' in message: # Found this text in the message, there is probably an attachment. + attPos = message.index(";base64,") # Finds the attachment position + attEndPos = message.index("' />") # Finds the end of the attachment + # attLen = attEndPos - attPos #Finds the length of the message + + if 'alt = "' in message: # We can get the filename too + fnPos = message.index('alt = "') # Finds position of the filename + fnEndPos = message.index('" src=') # Finds the end position + # fnLen = fnEndPos - fnPos #Finds the length of the filename + + fileName = message[fnPos + 7:fnEndPos] + else: + fnPos = attPos + fileName = 'Attachment' + + uInput = userInput( + '\n Attachment Detected. Would you like to save the attachment, (Y)es or (N)o?').lower() + if uInput in ("y", 'yes'): + + this_attachment = message[attPos + 9:attEndPos] + saveFile(fileName, this_attachment) + + message = message[:fnPos] + '~~' + message[attEndPos + 4:] + + else: + break + + # End attachment Detection + print('\n To:', getLabelForAddress(inboxMessages['inboxMessages'][msgNum]['toAddress'])) # Get the to address) + # Get the from address + print(' From:', getLabelForAddress(inboxMessages['inboxMessages'][msgNum]['fromAddress'])) + print(' Subject:', inboxMessages['inboxMessages'][msgNum]['subject'].decode('base64')) # Get the subject) + print(''.join([ + ' Received:', datetime.datetime.fromtimestamp( + float(inboxMessages['inboxMessages'][msgNum]['receivedTime'])).strftime('%Y-%m-%d %H:%M:%S'), + ])) + print(' Message:\n') + print(message) # inboxMessages['inboxMessages'][msgNum]['message'].decode('base64')) + print(' ') + return inboxMessages['inboxMessages'][msgNum]['msgid'] + + +def replyMsg(msgNum, forwardORreply): + """Allows you to reply to the message you are currently on. Saves typing in the addresses and subject.""" + + global usrPrompt + forwardORreply = forwardORreply.lower() # makes it lowercase + try: + inboxMessages = json.loads(api.getAllInboxMessages()) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + fromAdd = inboxMessages['inboxMessages'][msgNum]['toAddress'] # Address it was sent To, now the From address + message = inboxMessages['inboxMessages'][msgNum]['message'].decode('base64') # Message that you are replying too. + + subject = inboxMessages['inboxMessages'][msgNum]['subject'] + subject = subject.decode('base64') + + if forwardORreply == 'reply': + toAdd = inboxMessages['inboxMessages'][msgNum]['fromAddress'] # Address it was From, now the To address + subject = "Re: " + subject + + elif forwardORreply == 'forward': + subject = "Fwd: " + subject + + while True: + toAdd = userInput("What is the To Address?") + + if toAdd == "c": + usrPrompt = 1 + print(' ') + main() + elif validAddress(toAdd) is False: + print('\n Invalid Address. "c" to cancel. Please try again.\n') + else: + break + else: + print('\n Invalid Selection. Reply or Forward only') + usrPrompt = 0 + main() + + subject = subject.encode('base64') + + newMessage = userInput("Enter your Message.") + + uInput = userInput('Would you like to add an attachment, (Y)es or (N)o?').lower() + if uInput == "y": + newMessage = newMessage + '\n\n' + attachment() + + newMessage = newMessage + '\n\n------------------------------------------------------\n' + newMessage = newMessage + message + newMessage = newMessage.encode('base64') + + sendMsg(toAdd, fromAdd, subject, newMessage) + + main() + + +def delMsg(msgNum): + """Deletes a specified message from the inbox""" + + global usrPrompt + try: + inboxMessages = json.loads(api.getAllInboxMessages()) + # gets the message ID via the message index number + msgId = inboxMessages['inboxMessages'][int(msgNum)]['msgid'] + + msgAck = api.trashMessage(msgId) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + return msgAck + + +def delSentMsg(msgNum): + """Deletes a specified message from the outbox""" + + global usrPrompt + try: + outboxMessages = json.loads(api.getAllSentMessages()) + # gets the message ID via the message index number + msgId = outboxMessages['sentMessages'][int(msgNum)]['msgid'] + msgAck = api.trashSentMessage(msgId) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + return msgAck + + +def getLabelForAddress(address): + """Get label for an address""" + + if address in knownAddresses: + return knownAddresses[address] + else: + buildKnownAddresses() + if address in knownAddresses: + return knownAddresses[address] + + return address + + +def buildKnownAddresses(): + """Build known addresses""" + + global usrPrompt + + # add from address book + try: + response = api.listAddressBookEntries() + # if api is too old then fail + if "API Error 0020" in response: + return + addressBook = json.loads(response) + for entry in addressBook['addresses']: + if entry['address'] not in knownAddresses: + knownAddresses[entry['address']] = "%s (%s)" % (entry['label'].decode('base64'), entry['address']) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + # add from my addresses + try: + response = api.listAddresses2() + # if api is too old just return then fail + if "API Error 0020" in response: + return + addresses = json.loads(response) + for entry in addresses['addresses']: + if entry['address'] not in knownAddresses: + knownAddresses[entry['address']] = "%s (%s)" % (entry['label'].decode('base64'), entry['address']) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def listAddressBookEntries(): + """List addressbook entries""" + + global usrPrompt + + try: + response = api.listAddressBookEntries() + if "API Error" in response: + return getAPIErrorCode(response) + addressBook = json.loads(response) + print(' --------------------------------------------------------------') + print(' | Label | Address |') + print(' |--------------------|---------------------------------------|') + for entry in addressBook['addresses']: + label = entry['label'].decode('base64') + address = entry['address'] + if len(label) > 19: + label = label[:16] + '...' + print(' | ' + label.ljust(19) + '| ' + address.ljust(37) + ' |') + print(' --------------------------------------------------------------') + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def addAddressToAddressBook(address, label): + """Add an address to an addressbook""" + + global usrPrompt + + try: + response = api.addAddressBookEntry(address, label.encode('base64')) + if "API Error" in response: + return getAPIErrorCode(response) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def deleteAddressFromAddressBook(address): + """Delete an address from an addressbook""" + + global usrPrompt + + try: + response = api.deleteAddressBookEntry(address) + if "API Error" in response: + return getAPIErrorCode(response) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def getAPIErrorCode(response): + """Get API error code""" + + if "API Error" in response: + # if we got an API error return the number by getting the number + # after the second space and removing the trailing colon + return int(response.split()[2][:-1]) + + +def markMessageRead(messageID): + """Mark a message as read""" + + global usrPrompt + + try: + response = api.getInboxMessageByID(messageID, True) + if "API Error" in response: + return getAPIErrorCode(response) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def markMessageUnread(messageID): + """Mark a mesasge as unread""" + + global usrPrompt + + try: + response = api.getInboxMessageByID(messageID, False) + if "API Error" in response: + return getAPIErrorCode(response) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + +def markAllMessagesRead(): + """Mark all messages as read""" + + global usrPrompt + + try: + inboxMessages = json.loads(api.getAllInboxMessages())['inboxMessages'] + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + for message in inboxMessages: + if not message['read']: + markMessageRead(message['msgid']) + + +def markAllMessagesUnread(): + """Mark all messages as unread""" + + global usrPrompt + + try: + inboxMessages = json.loads(api.getAllInboxMessages())['inboxMessages'] + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + for message in inboxMessages: + if message['read']: + markMessageUnread(message['msgid']) + + +def clientStatus(): + """Print (the client status""" + + global usrPrompt + + try: + client_status = json.loads(api.clientStatus()) + except: # noqa:E722 + print('\n Connection Error\n') + usrPrompt = 0 + main() + + print("\nnetworkStatus: " + client_status['networkStatus'] + "\n") + print("\nnetworkConnections: " + str(client_status['networkConnections']) + "\n") + print("\nnumberOfPubkeysProcessed: " + str(client_status['numberOfPubkeysProcessed']) + "\n") + print("\nnumberOfMessagesProcessed: " + str(client_status['numberOfMessagesProcessed']) + "\n") + print("\nnumberOfBroadcastsProcessed: " + str(client_status['numberOfBroadcastsProcessed']) + "\n") + + +def shutdown(): + """Shutdown the API""" + + try: + api.shutdown() + except socket.error: + pass + print("\nShutdown command relayed\n") + + +def UI(usrInput): + """Main user menu""" + + global usrPrompt + + if usrInput in ("help", "h", "?"): + print(' ') + print(' -------------------------------------------------------------------------') + print(' | https://github.com/Dokument/PyBitmessage-Daemon |') + print(' |-----------------------------------------------------------------------|') + print(' | Command | Description |') + print(' |------------------------|----------------------------------------------|') + print(' | help | This help file. |') + print(' | apiTest | Tests the API |') + print(' | addInfo | Returns address information (If valid) |') + print(' | bmSettings | BitMessage settings |') + print(' | exit | Use anytime to return to main menu |') + print(' | quit | Quits the program |') + print(' |------------------------|----------------------------------------------|') + print(' | listAddresses | Lists all of the users addresses |') + print(' | generateAddress | Generates a new address |') + print(' | getAddress | Get determinist address from passphrase |') + print(' |------------------------|----------------------------------------------|') + print(' | listAddressBookEntries | Lists entries from the Address Book |') + print(' | addAddressBookEntry | Add address to the Address Book |') + print(' | deleteAddressBookEntry | Deletes address from the Address Book |') + print(' |------------------------|----------------------------------------------|') + print(' | subscribe | Subscribes to an address |') + print(' | unsubscribe | Unsubscribes from an address |') + print(' |------------------------|----------------------------------------------|') + print(' | create | Creates a channel |') + print(' | join | Joins a channel |') + print(' | leave | Leaves a channel |') + print(' |------------------------|----------------------------------------------|') + print(' | inbox | Lists the message information for the inbox |') + print(' | outbox | Lists the message information for the outbox |') + print(' | send | Send a new message or broadcast |') + print(' | unread | Lists all unread inbox messages |') + print(' | read | Reads a message from the inbox or outbox |') + print(' | save | Saves message to text file |') + print(' | delete | Deletes a message or all messages |') + print(' -------------------------------------------------------------------------') + print(' ') + main() + + elif usrInput == "apitest": # tests the API Connection. + if apiTest(): + print('\n API connection test has: PASSED\n') + else: + print('\n API connection test has: FAILED\n') + main() + + elif usrInput == "addinfo": + tmp_address = userInput('\nEnter the Bitmessage Address.') + address_information = json.loads(api.decodeAddress(tmp_address)) + + print('\n------------------------------') + + if 'success' in str(address_information['status']).lower(): + print(' Valid Address') + print(' Address Version: %s' % str(address_information['addressVersion'])) + print(' Stream Number: %s' % str(address_information['streamNumber'])) + else: + print(' Invalid Address !') + + print('------------------------------\n') + main() + + elif usrInput == "bmsettings": # tests the API Connection. + bmSettings() + print(' ') + main() + + elif usrInput == "quit": # Quits the application + print('\n Bye\n') + sys.exit(0) + + elif usrInput == "listaddresses": # Lists all of the identities in the addressbook + listAdd() + main() + + elif usrInput == "generateaddress": # Generates a new address + uInput = userInput('\nWould you like to create a (D)eterministic or (R)andom address?').lower() + + if uInput in ("d", "deterministic"): # Creates a deterministic address + deterministic = True + + lbl = '' + passphrase = userInput('Enter the Passphrase.') # .encode('base64') + numOfAdd = int(userInput('How many addresses would you like to generate?')) + addVNum = 3 + streamNum = 1 + isRipe = userInput('Shorten the address, (Y)es or (N)o?').lower() + + if isRipe == "y": + ripe = True + print(genAdd(lbl, deterministic, passphrase, numOfAdd, addVNum, streamNum, ripe)) + main() + elif isRipe == "n": + ripe = False + print(genAdd(lbl, deterministic, passphrase, numOfAdd, addVNum, streamNum, ripe)) + main() + elif isRipe == "exit": + usrPrompt = 1 + main() + else: + print('\n Invalid input\n') + main() + + elif uInput == "r" or uInput == "random": # Creates a random address with user-defined label + deterministic = False + null = '' + lbl = userInput('Enter the label for the new address.') + + print(genAdd(lbl, deterministic, null, null, null, null, null)) + main() + + else: + print('\n Invalid input\n') + main() + + elif usrInput == "getaddress": # Gets the address for/from a passphrase + phrase = userInput("Enter the address passphrase.") + print('\n Working...\n') + address = getAddress(phrase, 4, 1) # ,vNumber,sNumber) + print('\n Address: ' + address + '\n') + usrPrompt = 1 + main() + + elif usrInput == "subscribe": # Subsribe to an address + subscribe() + usrPrompt = 1 + main() + + elif usrInput == "unsubscribe": # Unsubscribe from an address + unsubscribe() + usrPrompt = 1 + main() + + elif usrInput == "listsubscriptions": # Unsubscribe from an address + listSubscriptions() + usrPrompt = 1 + main() + + elif usrInput == "create": + createChan() + usrPrompt = 1 + main() + + elif usrInput == "join": + joinChan() + usrPrompt = 1 + main() + + elif usrInput == "leave": + leaveChan() + usrPrompt = 1 + main() + + elif usrInput == "inbox": + print('\n Loading...\n') + inbox() + main() + + elif usrInput == "unread": + print('\n Loading...\n') + inbox(True) + main() + + elif usrInput == "outbox": + print('\n Loading...\n') + outbox() + main() + + elif usrInput == 'send': # Sends a message or broadcast + uInput = userInput('Would you like to send a (M)essage or (B)roadcast?').lower() + + if uInput in ('m', 'message'): + null = '' + sendMsg(null, null, null, null) + main() + elif uInput in ('b', 'broadcast'): + null = '' + sendBrd(null, null, null) + main() + + elif usrInput == "read": # Opens a message from the inbox for viewing. + + uInput = userInput("Would you like to read a message from the (I)nbox or (O)utbox?").lower() + + if uInput not in ('i', 'inbox', 'o', 'outbox'): + print('\n Invalid Input.\n') + usrPrompt = 1 + main() + + msgNum = int(userInput("What is the number of the message you wish to open?")) + + if uInput in ('i', 'inbox'): + print('\n Loading...\n') + messageID = readMsg(msgNum) + + uInput = userInput("\nWould you like to keep this message unread, (Y)es or (N)o?").lower() + + if uInput not in ('y', 'yes'): + markMessageRead(messageID) + usrPrompt = 1 + + uInput = userInput("\nWould you like to (D)elete, (F)orward, (R)eply to, or (Exit) this message?").lower() + + if uInput in ('r', 'reply'): + print('\n Loading...\n') + print(' ') + replyMsg(msgNum, 'reply') + usrPrompt = 1 + + elif uInput in ('f', 'forward'): + print('\n Loading...\n') + print(' ') + replyMsg(msgNum, 'forward') + usrPrompt = 1 + + elif uInput in ("d", 'delete'): + uInput = userInput("Are you sure, (Y)es or (N)o?").lower() # Prevent accidental deletion + + if uInput == "y": + delMsg(msgNum) + print('\n Message Deleted.\n') + usrPrompt = 1 + else: + usrPrompt = 1 + else: + print('\n Invalid entry\n') + usrPrompt = 1 + + elif uInput in ('o', 'outbox'): + readSentMsg(msgNum) + + # Gives the user the option to delete the message + uInput = userInput("Would you like to (D)elete, or (Exit) this message?").lower() + + if uInput in ("d", 'delete'): + uInput = userInput('Are you sure, (Y)es or (N)o?').lower() # Prevent accidental deletion + + if uInput == "y": + delSentMsg(msgNum) + print('\n Message Deleted.\n') + usrPrompt = 1 + else: + usrPrompt = 1 + else: + print('\n Invalid Entry\n') + usrPrompt = 1 + + main() + + elif usrInput == "save": + + uInput = userInput("Would you like to save a message from the (I)nbox or (O)utbox?").lower() + + if uInput not in ('i', 'inbox', 'o', 'outbox'): + print('\n Invalid Input.\n') + usrPrompt = 1 + main() + + if uInput in ('i', 'inbox'): + inboxMessages = json.loads(api.getAllInboxMessages()) + numMessages = len(inboxMessages['inboxMessages']) + + while True: + msgNum = int(userInput("What is the number of the message you wish to save?")) + + if msgNum >= numMessages: + print('\n Invalid Message Number.\n') + else: + break + + subject = inboxMessages['inboxMessages'][msgNum]['subject'].decode('base64') + # Don't decode since it is done in the saveFile function + message = inboxMessages['inboxMessages'][msgNum]['message'] + + elif uInput == 'o' or uInput == 'outbox': + outboxMessages = json.loads(api.getAllSentMessages()) + numMessages = len(outboxMessages['sentMessages']) + + while True: + msgNum = int(userInput("What is the number of the message you wish to save?")) + + if msgNum >= numMessages: + print('\n Invalid Message Number.\n') + else: + break + + subject = outboxMessages['sentMessages'][msgNum]['subject'].decode('base64') + # Don't decode since it is done in the saveFile function + message = outboxMessages['sentMessages'][msgNum]['message'] + + subject = subject + '.txt' + saveFile(subject, message) + + usrPrompt = 1 + main() + + elif usrInput == "delete": # will delete a message from the system, not reflected on the UI. + + uInput = userInput("Would you like to delete a message from the (I)nbox or (O)utbox?").lower() + + if uInput in ('i', 'inbox'): + inboxMessages = json.loads(api.getAllInboxMessages()) + numMessages = len(inboxMessages['inboxMessages']) + + while True: + msgNum = userInput( + 'Enter the number of the message you wish to delete or (A)ll to empty the inbox.').lower() + + if msgNum == 'a' or msgNum == 'all': + break + elif int(msgNum) >= numMessages: + print('\n Invalid Message Number.\n') + else: + break + + uInput = userInput("Are you sure, (Y)es or (N)o?").lower() # Prevent accidental deletion + + if uInput == "y": + if msgNum in ('a', 'all'): + print(' ') + for msgNum in range(0, numMessages): # processes all of the messages in the inbox + print(' Deleting message ', msgNum + 1, ' of ', numMessages) + delMsg(0) + + print('\n Inbox is empty.') + usrPrompt = 1 + else: + delMsg(int(msgNum)) + + print('\n Notice: Message numbers may have changed.\n') + main() + else: + usrPrompt = 1 + + elif uInput in ('o', 'outbox'): + outboxMessages = json.loads(api.getAllSentMessages()) + numMessages = len(outboxMessages['sentMessages']) + + while True: + msgNum = userInput( + 'Enter the number of the message you wish to delete or (A)ll to empty the inbox.').lower() + + if msgNum in ('a', 'all'): + break + elif int(msgNum) >= numMessages: + print('\n Invalid Message Number.\n') + else: + break + + uInput = userInput("Are you sure, (Y)es or (N)o?").lower() # Prevent accidental deletion + + if uInput == "y": + if msgNum in ('a', 'all'): + print(' ') + for msgNum in range(0, numMessages): # processes all of the messages in the outbox + print(' Deleting message ', msgNum + 1, ' of ', numMessages) + delSentMsg(0) + + print('\n Outbox is empty.') + usrPrompt = 1 + else: + delSentMsg(int(msgNum)) + print('\n Notice: Message numbers may have changed.\n') + main() + else: + usrPrompt = 1 + else: + print('\n Invalid Entry.\n') + usrPrompt = 1 + main() + + elif usrInput == "exit": + print('\n You are already at the main menu. Use "quit" to quit.\n') + usrPrompt = 1 + main() + + elif usrInput == "listaddressbookentries": + res = listAddressBookEntries() + if res == 20: + print('\n Error: API function not supported.\n') + usrPrompt = 1 + main() + + elif usrInput == "addaddressbookentry": + address = userInput('Enter address') + label = userInput('Enter label') + res = addAddressToAddressBook(address, label) + if res == 16: + print('\n Error: Address already exists in Address Book.\n') + if res == 20: + print('\n Error: API function not supported.\n') + usrPrompt = 1 + main() + + elif usrInput == "deleteaddressbookentry": + address = userInput('Enter address') + res = deleteAddressFromAddressBook(address) + if res == 20: + print('\n Error: API function not supported.\n') + usrPrompt = 1 + main() + + elif usrInput == "markallmessagesread": + markAllMessagesRead() + usrPrompt = 1 + main() + + elif usrInput == "markallmessagesunread": + markAllMessagesUnread() + usrPrompt = 1 + main() + + elif usrInput == "status": + clientStatus() + usrPrompt = 1 + main() + + elif usrInput == "shutdown": + shutdown() + usrPrompt = 1 + main() + + else: + print('\n "', usrInput, '" is not a command.\n') + usrPrompt = 1 + main() + + +def main(): + """Entrypoint for the CLI app""" + + global api + global usrPrompt + + if usrPrompt == 0: + print('\n ------------------------------') + print(' | Bitmessage Daemon by .dok |') + print(' | Version 0.3.1 for BM 0.6.2 |') + print(' ------------------------------') + api = xmlrpclib.ServerProxy(apiData()) # Connect to BitMessage using these api credentials + + if apiTest() is False: + print('\n ****************************************************************') + print(' WARNING: You are not connected to the Bitmessage client.') + print(' Either Bitmessage is not running or your settings are incorrect.') + print(' Use the command "apiTest" or "bmSettings" to resolve this issue.') + print(' ****************************************************************\n') + + print('Type (H)elp for a list of commands.') # Startup message) + usrPrompt = 2 + + elif usrPrompt == 1: + print('\nType (H)elp for a list of commands.') # Startup message) + usrPrompt = 2 + + try: + UI((raw_input('>').lower()).replace(" ", "")) + except EOFError: + UI("quit") + + +if __name__ == "__main__": + main() diff --git a/src/tests/mock/pybitmessage/bitmessagemain.py b/src/tests/mock/pybitmessage/bitmessagemain.py new file mode 100755 index 00000000..84313ab9 --- /dev/null +++ b/src/tests/mock/pybitmessage/bitmessagemain.py @@ -0,0 +1,431 @@ +#!/usr/bin/env python +""" +The PyBitmessage startup script +""" +# Copyright (c) 2012-2016 Jonathan Warren +# Copyright (c) 2012-2020 The Bitmessage developers +# Distributed under the MIT/X11 software license. See the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# Right now, PyBitmessage only support connecting to stream 1. It doesn't +# yet contain logic to expand into further streams. +import os +import sys + +try: + import pathmagic +except ImportError: + from pybitmessage import pathmagic +app_dir = pathmagic.setup() + +import depends +depends.check_dependencies() + +import getopt +import multiprocessing +# Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully. +import signal +import threading +import time +import traceback + +import defaults +import shared +import shutdown +import state + +from testmode_init import populate_api_test_data +from bmconfigparser import BMConfigParser +from debug import logger # this should go before any threads +from helper_startup import ( + adjustHalfOpenConnectionsLimit, fixSocket, start_proxyconfig) +from inventory import Inventory +# Network objects and threads +from network import ( + BMConnectionPool, Dandelion, AddrThread, AnnounceThread, BMNetworkThread, + InvThread, ReceiveQueueThread, DownloadThread, UploadThread +) +from network.knownnodes import readKnownNodes +from singleinstance import singleinstance +# Synchronous threads +from threads import ( + set_thread_name, printLock, + addressGenerator, objectProcessor, singleCleaner, singleWorker, sqlThread) + + +def signal_handler(signum, frame): + """Single handler for any signal sent to pybitmessage""" + process = multiprocessing.current_process() + thread = threading.current_thread() + logger.error( + 'Got signal %i in %s/%s', + signum, process.name, thread.name + ) + if process.name == "RegExParser": + # on Windows this isn't triggered, but it's fine, + # it has its own process termination thing + raise SystemExit + if "PoolWorker" in process.name: + raise SystemExit + if thread.name not in ("PyBitmessage", "MainThread"): + return + logger.error("Got signal %i", signum) + # there are possible non-UI variants to run bitmessage + # which should shutdown especially test-mode + if state.thisapp.daemon or not state.enableGUI: + shutdown.doCleanShutdown() + else: + print('# Thread: %s(%d)' % (thread.name, thread.ident)) + for filename, lineno, name, line in traceback.extract_stack(frame): + print('File: "%s", line %d, in %s' % (filename, lineno, name)) + if line: + print(' %s' % line.strip()) + print('Unfortunately you cannot use Ctrl+C when running the UI' + ' because the UI captures the signal.') + + +class Main(object): + """Main PyBitmessage class""" + def start(self): + """Start main application""" + # pylint: disable=too-many-statements,too-many-branches,too-many-locals + fixSocket() + adjustHalfOpenConnectionsLimit() + + config = BMConfigParser() + daemon = config.safeGetBoolean('bitmessagesettings', 'daemon') + + try: + opts, _ = getopt.getopt( + sys.argv[1:], "hcdt", + ["help", "curses", "daemon", "test"]) + + except getopt.GetoptError: + self.usage() + sys.exit(2) + + for opt, _ in opts: + if opt in ("-h", "--help"): + self.usage() + sys.exit() + elif opt in ("-d", "--daemon"): + daemon = True + elif opt in ("-c", "--curses"): + state.curses = True + elif opt in ("-t", "--test"): + state.testmode = True + if os.path.isfile(os.path.join( + state.appdata, 'unittest.lock')): + daemon = True + state.enableGUI = False # run without a UI + # Fallback: in case when no api command was issued + state.last_api_response = time.time() + # Apply special settings + config.set( + 'bitmessagesettings', 'apienabled', 'true') + config.set( + 'bitmessagesettings', 'apiusername', 'username') + config.set( + 'bitmessagesettings', 'apipassword', 'password') + config.set( + 'bitmessagesettings', 'apivariant', 'legacy') + config.set( + 'bitmessagesettings', 'apinotifypath', + os.path.join(app_dir, 'tests', 'apinotify_handler.py') + ) + + if daemon: + state.enableGUI = False # run without a UI + + if state.enableGUI and not state.curses and not depends.check_pyqt(): + sys.exit( + 'PyBitmessage requires PyQt unless you want' + ' to run it as a daemon and interact with it' + ' using the API. You can download PyQt from ' + 'http://www.riverbankcomputing.com/software/pyqt/download' + ' or by searching Google for \'PyQt Download\'.' + ' If you want to run in daemon mode, see ' + 'https://bitmessage.org/wiki/Daemon\n' + 'You can also run PyBitmessage with' + ' the new curses interface by providing' + ' \'-c\' as a commandline argument.' + ) + # is the application already running? If yes then exit. + state.thisapp = singleinstance("", daemon) + + if daemon: + with printLock: + print('Running as a daemon. Send TERM signal to end.') + self.daemonize() + + self.setSignalHandler() + + set_thread_name("PyBitmessage") + + state.dandelion = config.safeGetInt('network', 'dandelion') + # dandelion requires outbound connections, without them, + # stem objects will get stuck forever + if state.dandelion and not config.safeGetBoolean( + 'bitmessagesettings', 'sendoutgoingconnections'): + state.dandelion = 0 + + if state.testmode or config.safeGetBoolean( + 'bitmessagesettings', 'extralowdifficulty'): + defaults.networkDefaultProofOfWorkNonceTrialsPerByte = int( + defaults.networkDefaultProofOfWorkNonceTrialsPerByte / 100) + defaults.networkDefaultPayloadLengthExtraBytes = int( + defaults.networkDefaultPayloadLengthExtraBytes / 100) + + readKnownNodes() + + # Not needed if objproc is disabled + if state.enableObjProc: + + # Start the address generation thread + addressGeneratorThread = addressGenerator() + # close the main program even if there are threads left + addressGeneratorThread.daemon = True + addressGeneratorThread.start() + + # Start the thread that calculates POWs + singleWorkerThread = singleWorker() + # close the main program even if there are threads left + singleWorkerThread.daemon = True + singleWorkerThread.start() + + # Start the SQL thread + sqlLookup = sqlThread() + # DON'T close the main program even if there are threads left. + # The closeEvent should command this thread to exit gracefully. + sqlLookup.daemon = False + sqlLookup.start() + + Inventory() # init + # init, needs to be early because other thread may access it early + Dandelion() + + # Enable object processor and SMTP only if objproc enabled + if state.enableObjProc: + + # SMTP delivery thread + if daemon and config.safeGet( + 'bitmessagesettings', 'smtpdeliver', '') != '': + from class_smtpDeliver import smtpDeliver + smtpDeliveryThread = smtpDeliver() + smtpDeliveryThread.start() + + # SMTP daemon thread + if daemon and config.safeGetBoolean( + 'bitmessagesettings', 'smtpd'): + from class_smtpServer import smtpServer + smtpServerThread = smtpServer() + smtpServerThread.start() + + # Start the thread that calculates POWs + objectProcessorThread = objectProcessor() + # DON'T close the main program even the thread remains. + # This thread checks the shutdown variable after processing + # each object. + objectProcessorThread.daemon = False + objectProcessorThread.start() + + # Start the cleanerThread + singleCleanerThread = singleCleaner() + # close the main program even if there are threads left + singleCleanerThread.daemon = True + singleCleanerThread.start() + + # Not needed if objproc disabled + if state.enableObjProc: + shared.reloadMyAddressHashes() + shared.reloadBroadcastSendersForWhichImWatching() + + # API is also objproc dependent + if config.safeGetBoolean('bitmessagesettings', 'apienabled'): + import api # pylint: disable=relative-import + singleAPIThread = api.singleAPI() + # close the main program even if there are threads left + singleAPIThread.daemon = True + singleAPIThread.start() + + # start network components if networking is enabled + if state.enableNetwork: + start_proxyconfig() + BMConnectionPool().connectToStream(1) + asyncoreThread = BMNetworkThread() + asyncoreThread.daemon = True + asyncoreThread.start() + for i in range(config.getint('threads', 'receive')): + receiveQueueThread = ReceiveQueueThread(i) + receiveQueueThread.daemon = True + receiveQueueThread.start() + if config.safeGetBoolean('bitmessagesettings', 'udp'): + state.announceThread = AnnounceThread() + state.announceThread.daemon = True + state.announceThread.start() + state.invThread = InvThread() + state.invThread.daemon = True + state.invThread.start() + state.addrThread = AddrThread() + state.addrThread.daemon = True + state.addrThread.start() + state.downloadThread = DownloadThread() + state.downloadThread.daemon = True + state.downloadThread.start() + state.uploadThread = UploadThread() + state.uploadThread.daemon = True + state.uploadThread.start() + + if config.safeGetBoolean('bitmessagesettings', 'upnp'): + import upnp + upnpThread = upnp.uPnPThread() + upnpThread.start() + else: + # Populate with hardcoded value (same as connectToStream above) + state.streamsInWhichIAmParticipating.append(1) + + if not daemon and state.enableGUI: + if state.curses: + if not depends.check_curses(): + sys.exit() + print('Running with curses') + import bitmessagecurses + bitmessagecurses.runwrapper() + else: + import bitmessageqt + bitmessageqt.run() + else: + config.remove_option('bitmessagesettings', 'dontconnect') + + if state.testmode: + populate_api_test_data() + + if daemon: + while state.shutdown == 0: + time.sleep(1) + if ( + state.testmode + and time.time() - state.last_api_response >= 30 + ): + self.stop() + elif not state.enableGUI: + state.enableGUI = True + try: + # pylint: disable=relative-import + from tests import core as test_core + except ImportError: + self.stop() + return + + test_core_result = test_core.run() + self.stop() + test_core.cleanup() + sys.exit(not test_core_result.wasSuccessful()) + + @staticmethod + def daemonize(): + """Running as a daemon. Send signal in end.""" + grandfatherPid = os.getpid() + parentPid = None + try: + if os.fork(): + # unlock + state.thisapp.cleanup() + # wait until grandchild ready + while True: + time.sleep(1) + os._exit(0) # pylint: disable=protected-access + except AttributeError: + # fork not implemented + pass + else: + parentPid = os.getpid() + state.thisapp.lock() # relock + + os.umask(0) + try: + os.setsid() + except AttributeError: + # setsid not implemented + pass + try: + if os.fork(): + # unlock + state.thisapp.cleanup() + # wait until child ready + while True: + time.sleep(1) + os._exit(0) # pylint: disable=protected-access + except AttributeError: + # fork not implemented + pass + else: + state.thisapp.lock() # relock + state.thisapp.lockPid = None # indicate we're the final child + sys.stdout.flush() + sys.stderr.flush() + if not sys.platform.startswith('win'): + si = open(os.devnull, 'r') + so = open(os.devnull, 'a+') + se = open(os.devnull, 'a+', 0) + os.dup2(si.fileno(), sys.stdin.fileno()) + os.dup2(so.fileno(), sys.stdout.fileno()) + os.dup2(se.fileno(), sys.stderr.fileno()) + if parentPid: + # signal ready + os.kill(parentPid, signal.SIGTERM) + os.kill(grandfatherPid, signal.SIGTERM) + + @staticmethod + def setSignalHandler(): + """Setting the Signal Handler""" + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + # signal.signal(signal.SIGINT, signal.SIG_DFL) + + @staticmethod + def usage(): + """Displaying the usages""" + print('Usage: ' + sys.argv[0] + ' [OPTIONS]') + print(''' +Options: + -h, --help show this help message and exit + -c, --curses use curses (text mode) interface + -d, --daemon run in daemon (background) mode + -t, --test dryrun, make testing + +All parameters are optional. +''') + + @staticmethod + def stop(): + """Stop main application""" + with printLock: + print('Stopping Bitmessage Deamon.') + shutdown.doCleanShutdown() + + # .. todo:: nice function but no one is using this + @staticmethod + def getApiAddress(): + """This function returns API address and port""" + if not BMConfigParser().safeGetBoolean( + 'bitmessagesettings', 'apienabled'): + return None + address = BMConfigParser().get('bitmessagesettings', 'apiinterface') + port = BMConfigParser().getint('bitmessagesettings', 'apiport') + return {'address': address, 'port': port} + + +def main(): + """Triggers main module""" + mainprogram = Main() + mainprogram.start() + + +if __name__ == "__main__": + main() + + +# So far, the creation of and management of the Bitmessage protocol and this +# client is a one-man operation. Bitcoin tips are quite appreciated. +# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u diff --git a/src/tests/mock/pybitmessage/bmconfigparser.py b/src/tests/mock/pybitmessage/bmconfigparser.py new file mode 100644 index 00000000..4798dda4 --- /dev/null +++ b/src/tests/mock/pybitmessage/bmconfigparser.py @@ -0,0 +1,271 @@ +""" +BMConfigParser class definition and default configuration settings +""" + +import os +import shutil +import sys # FIXME: bad style! write more generally +from datetime import datetime + +from six import string_types +from six.moves import configparser + +try: + import state + from singleton import Singleton +except ImportError: + from pybitmessage import state + from pybitmessage.singleton import Singleton + +SafeConfigParser = configparser.SafeConfigParser + + +BMConfigDefaults = { + "bitmessagesettings": { + "maxaddrperstreamsend": 500, + "maxbootstrapconnections": 20, + "maxdownloadrate": 0, + "maxoutboundconnections": 8, + "maxtotalconnections": 200, + "maxuploadrate": 0, + "apiinterface": "127.0.0.1", + "apiport": 8442, + "udp": "True" + }, + "threads": { + "receive": 3, + }, + "network": { + "bind": "", + "dandelion": 90, + }, + "inventory": { + "storage": "sqlite", + "acceptmismatch": "False", + }, + "knownnodes": { + "maxnodes": 20000, + }, + "zlib": { + "maxsize": 1048576 + } +} + + +@Singleton +class BMConfigParser(SafeConfigParser): + """ + Singleton class inherited from :class:`ConfigParser.SafeConfigParser` + with additional methods specific to bitmessage config. + """ + # pylint: disable=too-many-ancestors + _temp = {} + + def set(self, section, option, value=None): + if self._optcre is self.OPTCRE or value: + if not isinstance(value, string_types): + raise TypeError("option values must be strings") + if not self.validate(section, option, value): + raise ValueError("Invalid value %s" % value) + return SafeConfigParser.set(self, section, option, value) + + def get(self, section, option, raw=False, vars=None): + if sys.version_info[0] == 3: + # pylint: disable=arguments-differ + try: + if section == "bitmessagesettings" and option == "timeformat": + return SafeConfigParser.get( + self, section, option, raw=True, vars=vars) + try: + return self._temp[section][option] + except KeyError: + pass + return SafeConfigParser.get( + self, section, option, raw=True, vars=vars) + except configparser.InterpolationError: + return SafeConfigParser.get( + self, section, option, raw=True, vars=vars) + except (configparser.NoSectionError, configparser.NoOptionError) as e: + try: + return BMConfigDefaults[section][option] + except (KeyError, ValueError, AttributeError): + raise e + else: + # pylint: disable=arguments-differ + try: + if section == "bitmessagesettings" and option == "timeformat": + return SafeConfigParser.get( + self, section, option, raw, vars) + try: + return self._temp[section][option] + except KeyError: + pass + return SafeConfigParser.get( + self, section, option, True, vars) + except configparser.InterpolationError: + return SafeConfigParser.get( + self, section, option, True, vars) + except (configparser.NoSectionError, configparser.NoOptionError) as e: + try: + return BMConfigDefaults[section][option] + except (KeyError, ValueError, AttributeError): + raise e + + def setTemp(self, section, option, value=None): + """Temporary set option to value, not saving.""" + try: + self._temp[section][option] = value + except KeyError: + self._temp[section] = {option: value} + + def safeGetBoolean(self, section, field): + """Return value as boolean, False on exceptions""" + try: + # Used in the python2.7 + # return self.getboolean(section, field) + # Used in the python3.5.2 + # print(config, section, field) + return self.getboolean(section, field) + except (configparser.NoSectionError, configparser.NoOptionError, + ValueError, AttributeError): + return False + + def safeGetInt(self, section, field, default=0): + """Return value as integer, default on exceptions, + 0 if default missing""" + try: + # Used in the python2.7 + # return self.getint(section, field) + # Used in the python3.7.0 + return int(self.get(section, field)) + except (configparser.NoSectionError, configparser.NoOptionError, + ValueError, AttributeError): + return default + + def safeGetFloat(self, section, field, default=0.0): + """Return value as float, default on exceptions, + 0.0 if default missing""" + try: + return self.getfloat(section, field) + except (configparser.NoSectionError, configparser.NoOptionError, + ValueError, AttributeError): + return default + + def safeGet(self, section, option, default=None): + """Return value as is, default on exceptions, None if default missing""" + try: + return self.get(section, option) + except (configparser.NoSectionError, configparser.NoOptionError, + ValueError, AttributeError): + return default + + def items(self, section, raw=False, variables=None): + # pylint: disable=signature-differs + """Return section variables as parent, + but override the "raw" argument to always True""" + return SafeConfigParser.items(self, section, True, variables) + + def _reset(self): + """Reset current config. There doesn't appear to be a built in + method for this""" + sections = self.sections() + for x in sections: + self.remove_section(x) + + if sys.version_info[0] == 3: + @staticmethod + def addresses(hidden=False): + """Return a list of local bitmessage addresses (from section labels)""" + return [x for x in BMConfigParser().sections() if x.startswith('BM-') and ( + hidden or not BMConfigParser().safeGetBoolean(x, 'hidden'))] + + def read(self, filenames): + self._reset() + SafeConfigParser.read(self, filenames) + for section in self.sections(): + for option in self.options(section): + try: + if not self.validate( + section, option, + self[section][option] + ): + try: + newVal = BMConfigDefaults[section][option] + except configparser.NoSectionError: + continue + except KeyError: + continue + SafeConfigParser.set( + self, section, option, newVal) + except configparser.InterpolationError: + continue + + def readfp(self, fp, filename=None): + # pylint: disable=no-member + SafeConfigParser.read_file(self, fp) + else: + @staticmethod + def addresses(): + """Return a list of local bitmessage addresses (from section labels)""" + return [ + x for x in BMConfigParser().sections() if x.startswith('BM-')] + + def read(self, filenames): + """Read config and populate defaults""" + self._reset() + SafeConfigParser.read(self, filenames) + for section in self.sections(): + for option in self.options(section): + try: + if not self.validate( + section, option, + SafeConfigParser.get(self, section, option) + ): + try: + newVal = BMConfigDefaults[section][option] + except KeyError: + continue + SafeConfigParser.set( + self, section, option, newVal) + except configparser.InterpolationError: + continue + + def save(self): + """Save the runtime config onto the filesystem""" + fileName = os.path.join(state.appdata, 'keys.dat') + fileNameBak = '.'.join([ + fileName, datetime.now().strftime("%Y%j%H%M%S%f"), 'bak']) + # create a backup copy to prevent the accidental loss due to + # the disk write failure + try: + shutil.copyfile(fileName, fileNameBak) + # The backup succeeded. + fileNameExisted = True + except (IOError, Exception): + # The backup failed. This can happen if the file + # didn't exist before. + fileNameExisted = False + + with open(fileName, 'w') as configfile: + self.write(configfile) + # delete the backup + if fileNameExisted: + os.remove(fileNameBak) + + def validate(self, section, option, value): + """Input validator interface (using factory pattern)""" + try: + return getattr(self, 'validate_%s_%s' % (section, option))(value) + except AttributeError: + return True + + @staticmethod + def validate_bitmessagesettings_maxoutboundconnections(value): + """Reject maxoutboundconnections that are too high or too low""" + try: + value = int(value) + except ValueError: + return False + if value < 0 or value > 8: + return False + return True diff --git a/src/tests/mock/pybitmessage/build_osx.py b/src/tests/mock/pybitmessage/build_osx.py new file mode 100644 index 00000000..83d2f280 --- /dev/null +++ b/src/tests/mock/pybitmessage/build_osx.py @@ -0,0 +1,38 @@ +"""Building osx.""" +import os +from glob import glob +from PyQt4 import QtCore +from setuptools import setup + +name = "Bitmessage" +version = os.getenv("PYBITMESSAGEVERSION", "custom") +mainscript = ["bitmessagemain.py"] + +DATA_FILES = [ + ('', ['sslkeys', 'images']), + ('bitmsghash', ['bitmsghash/bitmsghash.cl', 'bitmsghash/bitmsghash.so']), + ('translations', glob('translations/*.qm')), + ('ui', glob('bitmessageqt/*.ui')), + ( + 'translations', + glob(os.path.join(str(QtCore.QLibraryInfo.location( + QtCore.QLibraryInfo.TranslationsPath)), 'qt_??.qm'))), + ( + 'translations', + glob(os.path.join(str(QtCore.QLibraryInfo.location( + QtCore.QLibraryInfo.TranslationsPath)), 'qt_??_??.qm'))), +] + +setup( + name=name, + version=version, + app=mainscript, + data_files=DATA_FILES, + setup_requires=["py2app"], + options=dict( + py2app=dict( + includes=['sip', 'PyQt4._qt'], + iconfile="images/bitmessage.icns" + ) + ) +) diff --git a/src/tests/mock/class_addressGenerator.py b/src/tests/mock/pybitmessage/class_addressGenerator.py similarity index 98% rename from src/tests/mock/class_addressGenerator.py rename to src/tests/mock/pybitmessage/class_addressGenerator.py index fbb34710..0aaed140 100644 --- a/src/tests/mock/class_addressGenerator.py +++ b/src/tests/mock/pybitmessage/class_addressGenerator.py @@ -60,7 +60,7 @@ class StoppableThread(threading.Thread): self.stop.set() -class FakeAddressGenerator(StoppableThread): +class addressGenerator(StoppableThread): """A thread for creating fake addresses""" name = "addressGenerator" address_list = list(fake_addresses.keys()) diff --git a/src/tests/mock/class_objectProcessor.py b/src/tests/mock/pybitmessage/class_objectProcessor.py similarity index 97% rename from src/tests/mock/class_objectProcessor.py rename to src/tests/mock/pybitmessage/class_objectProcessor.py index c418abc6..09120fc6 100644 --- a/src/tests/mock/class_objectProcessor.py +++ b/src/tests/mock/pybitmessage/class_objectProcessor.py @@ -15,7 +15,7 @@ from network import bmproto logger = logging.getLogger('default') -class MockObjectProcessor(threading.Thread): +class objectProcessor(threading.Thread): """ The objectProcessor thread, of which there is only one, receives network objects (msg, broadcast, pubkey, getpubkey) from the receiveDataThreads. diff --git a/src/tests/mock/pybitmessage/class_singleCleaner.py b/src/tests/mock/pybitmessage/class_singleCleaner.py new file mode 100644 index 00000000..3f3f8ec0 --- /dev/null +++ b/src/tests/mock/pybitmessage/class_singleCleaner.py @@ -0,0 +1,187 @@ +""" +The `singleCleaner` class is a timer-driven thread that cleans data structures +to free memory, resends messages when a remote node doesn't respond, and +sends pong messages to keep connections alive if the network isn't busy. + +It cleans these data structures in memory: + - inventory (moves data to the on-disk sql database) + - inventorySets (clears then reloads data out of sql database) + +It cleans these tables on the disk: + - inventory (clears expired objects) + - pubkeys (clears pubkeys older than 4 weeks old which we have not used + personally) + - knownNodes (clears addresses which have not been online for over 3 days) + +It resends messages when there has been no response: + - resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...) + - resends msg messages in 5 days (then 10 days, then 20 days, etc...) + +""" + +import gc +import os +import time + +import queues +import state +from bmconfigparser import BMConfigParser +from helper_sql import sqlExecute, sqlQuery +from inventory import Inventory +from network import BMConnectionPool, knownnodes, StoppableThread +from tr import _translate + + +#: Equals 4 weeks. You could make this longer if you want +#: but making it shorter would not be advisable because +#: there is a very small possibility that it could keep you +#: from obtaining a needed pubkey for a period of time. +lengthOfTimeToHoldOnToAllPubkeys = 2419200 + + +class singleCleaner(StoppableThread): + """The singleCleaner thread class""" + name = "singleCleaner" + cycleLength = 300 + expireDiscoveredPeers = 300 + + def run(self): # pylint: disable=too-many-branches + gc.disable() + timeWeLastClearedInventoryAndPubkeysTables = 0 + try: + state.maximumLengthOfTimeToBotherResendingMessages = ( + BMConfigParser().getfloat( + 'bitmessagesettings', 'stopresendingafterxdays') + * 24 * 60 * 60 + ) + ( + BMConfigParser().getfloat( + 'bitmessagesettings', 'stopresendingafterxmonths') + * (60 * 60 * 24 * 365) / 12) + except: # noqa:E722 + # Either the user hasn't set stopresendingafterxdays and + # stopresendingafterxmonths yet or the options are missing + # from the config file. + state.maximumLengthOfTimeToBotherResendingMessages = float('inf') + + while state.shutdown == 0: + self.stop.wait(self.cycleLength) + queues.UISignalQueue.put(( + 'updateStatusBar', + 'Doing housekeeping (Flushing inventory in memory to disk...)' + )) + Inventory().flush() + queues.UISignalQueue.put(('updateStatusBar', '')) + + # If we are running as a daemon then we are going to fill up the UI + # queue which will never be handled by a UI. We should clear it to + # save memory. + # FIXME redundant? + if state.thisapp.daemon or not state.enableGUI: + queues.UISignalQueue.queue.clear() + + tick = int(time.time()) + if timeWeLastClearedInventoryAndPubkeysTables < tick - 7380: + timeWeLastClearedInventoryAndPubkeysTables = tick + Inventory().clean() + queues.workerQueue.put(('sendOnionPeerObj', '')) + # pubkeys + sqlExecute( + "DELETE FROM pubkeys WHERE time?)", + tick, + tick - state.maximumLengthOfTimeToBotherResendingMessages + ) + for toAddress, ackData, status in queryreturn: + if status == 'awaitingpubkey': + self.resendPubkeyRequest(toAddress) + elif status == 'msgsent': + self.resendMsg(ackData) + + try: + # Cleanup knownnodes and handle possible severe exception + # while writing it to disk + knownnodes.cleanupKnownNodes() + except Exception as err: + if "Errno 28" in str(err): + self.logger.fatal( + '(while writing knownnodes to disk)' + ' Alert: Your disk or data storage volume is full.' + ) + queues.UISignalQueue.put(( + 'alert', + (_translate("MainWindow", "Disk full"), + _translate( + "MainWindow", + 'Alert: Your disk or data storage volume' + ' is full. Bitmessage will now exit.'), + True) + )) + # FIXME redundant? + if state.thisapp.daemon or not state.enableGUI: + os._exit(1) # pylint: disable=protected-access + + # inv/object tracking + for connection in BMConnectionPool().connections(): + connection.clean() + + # discovery tracking + exp = time.time() - singleCleaner.expireDiscoveredPeers + reaper = (k for k, v in state.discoveredPeers.items() if v < exp) + for k in reaper: + try: + del state.discoveredPeers[k] + except KeyError: + pass + # ..todo:: cleanup pending upload / download + + gc.collect() + + def resendPubkeyRequest(self, address): + """Resend pubkey request for address""" + self.logger.debug( + 'It has been a long time and we haven\'t heard a response to our' + ' getpubkey request. Sending again.' + ) + try: + # We need to take this entry out of the neededPubkeys structure + # because the queues.workerQueue checks to see whether the entry + # is already present and will not do the POW and send the message + # because it assumes that it has already done it recently. + del state.neededPubkeys[address] + except KeyError: + pass + except RuntimeError: + self.logger.warning( + "Can't remove %s from neededPubkeys, requesting pubkey will be delayed", address, exc_info=True) + + queues.UISignalQueue.put(( + 'updateStatusBar', + 'Doing work necessary to again attempt to request a public key...' + )) + sqlExecute( + "UPDATE sent SET status = 'msgqueued'" + " WHERE toaddress = ? AND folder = 'sent'", address) + queues.workerQueue.put(('sendmessage', '')) + + def resendMsg(self, ackdata): + """Resend message by ackdata""" + self.logger.debug( + 'It has been a long time and we haven\'t heard an acknowledgement' + ' to our msg. Sending again.' + ) + sqlExecute( + "UPDATE sent SET status = 'msgqueued'" + " WHERE ackdata = ? AND folder = 'sent'", ackdata) + queues.workerQueue.put(('sendmessage', '')) + queues.UISignalQueue.put(( + 'updateStatusBar', + 'Doing work necessary to again attempt to deliver a message...' + )) diff --git a/src/tests/mock/class_singleWorker.py b/src/tests/mock/pybitmessage/class_singleWorker.py similarity index 96% rename from src/tests/mock/class_singleWorker.py rename to src/tests/mock/pybitmessage/class_singleWorker.py index 92ffffbd..f60b1f05 100644 --- a/src/tests/mock/class_singleWorker.py +++ b/src/tests/mock/pybitmessage/class_singleWorker.py @@ -12,7 +12,7 @@ from network import StoppableThread from six.moves import queue -class MockSingleWorker(StoppableThread): +class singleWorker(StoppableThread): """Thread for performing PoW""" def __init__(self): diff --git a/src/tests/mock/pybitmessage/class_smtpDeliver.py b/src/tests/mock/pybitmessage/class_smtpDeliver.py new file mode 100644 index 00000000..08cb35ab --- /dev/null +++ b/src/tests/mock/pybitmessage/class_smtpDeliver.py @@ -0,0 +1,117 @@ +""" +SMTP client thread for delivering emails +""" +# pylint: disable=unused-variable + +import smtplib +import urlparse +from email.header import Header +from email.mime.text import MIMEText + +import queues +import state +from bmconfigparser import BMConfigParser +from network.threads import StoppableThread + +SMTPDOMAIN = "bmaddr.lan" + + +class smtpDeliver(StoppableThread): + """SMTP client thread for delivery""" + name = "smtpDeliver" + _instance = None + + def stopThread(self): + # pylint: disable=no-member + try: + queues.UISignallerQueue.put(("stopThread", "data")) + except: # noqa:E722 + pass + super(smtpDeliver, self).stopThread() + + @classmethod + def get(cls): + """(probably) Singleton functionality""" + if not cls._instance: + cls._instance = smtpDeliver() + return cls._instance + + def run(self): + # pylint: disable=too-many-branches,too-many-statements,too-many-locals + # pylint: disable=deprecated-lambda + while state.shutdown == 0: + command, data = queues.UISignalQueue.get() + if command == 'writeNewAddressToTable': + label, address, streamNumber = data + elif command == 'updateStatusBar': + pass + elif command == 'updateSentItemStatusByToAddress': + toAddress, message = data + elif command == 'updateSentItemStatusByAckdata': + ackData, message = data + elif command == 'displayNewInboxMessage': + inventoryHash, toAddress, fromAddress, subject, body = data + dest = BMConfigParser().safeGet("bitmessagesettings", "smtpdeliver", '') + if dest == '': + continue + try: + u = urlparse.urlparse(dest) + to = urlparse.parse_qs(u.query)['to'] + client = smtplib.SMTP(u.hostname, u.port) + msg = MIMEText(body, 'plain', 'utf-8') + msg['Subject'] = Header(subject, 'utf-8') + msg['From'] = fromAddress + '@' + SMTPDOMAIN + toLabel = map( + lambda y: BMConfigParser().safeGet(y, "label"), + filter( + lambda x: x == toAddress, BMConfigParser().addresses()) + ) + if toLabel: + msg['To'] = "\"%s\" <%s>" % (Header(toLabel[0], 'utf-8'), toAddress + '@' + SMTPDOMAIN) + else: + msg['To'] = toAddress + '@' + SMTPDOMAIN + client.ehlo() + client.starttls() + client.ehlo() + client.sendmail(msg['From'], [to], msg.as_string()) + self.logger.info( + 'Delivered via SMTP to %s through %s:%i ...', + to, u.hostname, u.port) + client.quit() + except: # noqa:E722 + self.logger.error('smtp delivery error', exc_info=True) + elif command == 'displayNewSentMessage': + toAddress, fromLabel, fromAddress, subject, message, ackdata = data + elif command == 'updateNetworkStatusTab': + pass + elif command == 'updateNumberOfMessagesProcessed': + pass + elif command == 'updateNumberOfPubkeysProcessed': + pass + elif command == 'updateNumberOfBroadcastsProcessed': + pass + elif command == 'setStatusIcon': + pass + elif command == 'changedInboxUnread': + pass + elif command == 'rerenderMessagelistFromLabels': + pass + elif command == 'rerenderMessagelistToLabels': + pass + elif command == 'rerenderAddressBook': + pass + elif command == 'rerenderSubscriptions': + pass + elif command == 'rerenderBlackWhiteList': + pass + elif command == 'removeInboxRowByMsgid': + pass + elif command == 'newVersionAvailable': + pass + elif command == 'alert': + title, text, exitAfterUserClicksOk = data + elif command == 'stopThread': + break + else: + self.logger.warning( + 'Command sent to smtpDeliver not recognized: %s', command) diff --git a/src/tests/mock/pybitmessage/class_smtpServer.py b/src/tests/mock/pybitmessage/class_smtpServer.py new file mode 100644 index 00000000..f5b63c2e --- /dev/null +++ b/src/tests/mock/pybitmessage/class_smtpServer.py @@ -0,0 +1,217 @@ +""" +SMTP server thread +""" +import asyncore +import base64 +import email +import logging +import re +import signal +import smtpd +import threading +import time +from email.header import decode_header +from email.parser import Parser + +import queues +from addresses import decodeAddress +from bmconfigparser import BMConfigParser +from helper_ackPayload import genAckPayload +from helper_sql import sqlExecute +from network.threads import StoppableThread +from version import softwareVersion + +SMTPDOMAIN = "bmaddr.lan" +LISTENPORT = 8425 + +logger = logging.getLogger('default') +# pylint: disable=attribute-defined-outside-init + + +class SmtpServerChannelException(Exception): + """Generic smtp server channel exception.""" + pass + + +class smtpServerChannel(smtpd.SMTPChannel): + """Asyncore channel for SMTP protocol (server)""" + def smtp_EHLO(self, arg): + """Process an EHLO""" + if not arg: + self.push('501 Syntax: HELO hostname') + return + self.push('250-PyBitmessage %s' % softwareVersion) + self.push('250 AUTH PLAIN') + + def smtp_AUTH(self, arg): + """Process AUTH""" + if not arg or arg[0:5] not in ["PLAIN"]: + self.push('501 Syntax: AUTH PLAIN') + return + authstring = arg[6:] + try: + decoded = base64.b64decode(authstring) + correctauth = "\x00" + BMConfigParser().safeGet( + "bitmessagesettings", "smtpdusername", "") + "\x00" + BMConfigParser().safeGet( + "bitmessagesettings", "smtpdpassword", "") + logger.debug('authstring: %s / %s', correctauth, decoded) + if correctauth == decoded: + self.auth = True + self.push('235 2.7.0 Authentication successful') + else: + raise SmtpServerChannelException("Auth fail") + except: # noqa:E722 + self.push('501 Authentication fail') + + def smtp_DATA(self, arg): + """Process DATA""" + if not hasattr(self, "auth") or not self.auth: + self.push('530 Authentication required') + return + smtpd.SMTPChannel.smtp_DATA(self, arg) + + +class smtpServerPyBitmessage(smtpd.SMTPServer): + """Asyncore SMTP server class""" + def handle_accept(self): + """Accept a connection""" + pair = self.accept() + if pair is not None: + conn, addr = pair + self.channel = smtpServerChannel(self, conn, addr) + + def send(self, fromAddress, toAddress, subject, message): + """Send a bitmessage""" + # pylint: disable=arguments-differ + streamNumber, ripe = decodeAddress(toAddress)[2:] + stealthLevel = BMConfigParser().safeGetInt('bitmessagesettings', 'ackstealthlevel') + ackdata = genAckPayload(streamNumber, stealthLevel) + sqlExecute( + '''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', + '', + toAddress, + ripe, + fromAddress, + subject, + message, + ackdata, + int(time.time()), # sentTime (this will never change) + int(time.time()), # lastActionTime + 0, # sleepTill time. This will get set when the POW gets done. + 'msgqueued', + 0, # retryNumber + 'sent', # folder + 2, # encodingtype + # not necessary to have a TTL higher than 2 days + min(BMConfigParser().getint('bitmessagesettings', 'ttl'), 86400 * 2) + ) + + queues.workerQueue.put(('sendmessage', toAddress)) + + def decode_header(self, hdr): + """Email header decoding""" + ret = [] + for h in decode_header(self.msg_headers[hdr]): + if h[1]: + ret.append(h[0].decode(h[1])) + else: + ret.append(h[0].decode("utf-8", errors='replace')) + + return ret + + def process_message(self, peer, mailfrom, rcpttos, data): + """Process an email""" + # pylint: disable=too-many-locals, too-many-branches + p = re.compile(".*<([^>]+)>") + if not hasattr(self.channel, "auth") or not self.channel.auth: + logger.error('Missing or invalid auth') + return + try: + self.msg_headers = Parser().parsestr(data) + except: # noqa:E722 + logger.error('Invalid headers') + return + + try: + sender, domain = p.sub(r'\1', mailfrom).split("@") + if domain != SMTPDOMAIN: + raise Exception("Bad domain %s" % domain) + if sender not in BMConfigParser().addresses(): + raise Exception("Nonexisting user %s" % sender) + except Exception as err: + logger.debug('Bad envelope from %s: %r', mailfrom, err) + msg_from = self.decode_header("from") + try: + msg_from = p.sub(r'\1', self.decode_header("from")[0]) + sender, domain = msg_from.split("@") + if domain != SMTPDOMAIN: + raise Exception("Bad domain %s" % domain) + if sender not in BMConfigParser().addresses(): + raise Exception("Nonexisting user %s" % sender) + except Exception as err: + logger.error('Bad headers from %s: %r', msg_from, err) + return + + try: + msg_subject = self.decode_header('subject')[0] + except: # noqa:E722 + msg_subject = "Subject missing..." + + msg_tmp = email.message_from_string(data) + body = u'' + for part in msg_tmp.walk(): + if part and part.get_content_type() == "text/plain": + body += part.get_payload(decode=1).decode(part.get_content_charset('utf-8'), errors='replace') + + for to in rcpttos: + try: + rcpt, domain = p.sub(r'\1', to).split("@") + if domain != SMTPDOMAIN: + raise Exception("Bad domain %s" % domain) + logger.debug( + 'Sending %s to %s about %s', sender, rcpt, msg_subject) + self.send(sender, rcpt, msg_subject, body) + logger.info('Relayed %s to %s', sender, rcpt) + except Exception as err: + logger.error('Bad to %s: %r', to, err) + continue + return + + +class smtpServer(StoppableThread): + """SMTP server thread""" + def __init__(self, _=None): + super(smtpServer, self).__init__(name="smtpServerThread") + self.server = smtpServerPyBitmessage(('127.0.0.1', LISTENPORT), None) + + def stopThread(self): + super(smtpServer, self).stopThread() + self.server.close() + return + + def run(self): + asyncore.loop(1) + + +def signals(_, __): + """Signal handler""" + logger.warning('Got signal, terminating') + for thread in threading.enumerate(): + if thread.isAlive() and isinstance(thread, StoppableThread): + thread.stopThread() + + +def runServer(): + """Run SMTP server as a standalone python process""" + logger.warning('Running SMTPd thread') + smtpThread = smtpServer() + smtpThread.start() + signal.signal(signal.SIGINT, signals) + signal.signal(signal.SIGTERM, signals) + logger.warning('Processing') + smtpThread.join() + logger.warning('The end') + + +if __name__ == "__main__": + runServer() diff --git a/src/tests/mock/pybitmessage/class_sqlThread.py b/src/tests/mock/pybitmessage/class_sqlThread.py new file mode 100644 index 00000000..d22ffadb --- /dev/null +++ b/src/tests/mock/pybitmessage/class_sqlThread.py @@ -0,0 +1,639 @@ +""" +sqlThread is defined here +""" + +import os +import shutil # used for moving the messages.dat file +import sqlite3 +import sys +import threading +import time + +try: + import helper_sql + import helper_startup + import paths + import queues + import state + from addresses import encodeAddress + from bmconfigparser import BMConfigParser + from debug import logger + from tr import _translate +except ImportError: + from . import helper_sql, helper_startup, paths, queues, state + from .addresses import encodeAddress + from .bmconfigparser import BMConfigParser + from .debug import logger + from .tr import _translate + + +class sqlThread(threading.Thread): + """A thread for all SQL operations""" + + def __init__(self): + threading.Thread.__init__(self, name="SQL") + + def run(self): # pylint: disable=too-many-locals, too-many-branches, too-many-statements + """Process SQL queries from `.helper_sql.sqlSubmitQueue`""" + helper_sql.sql_available = True + self.conn = sqlite3.connect(state.appdata + 'messages.dat') + self.conn.text_factory = str + self.cur = self.conn.cursor() + + self.cur.execute('PRAGMA secure_delete = true') + + # call create_function for encode address + self.create_function() + + try: + self.cur.execute( + '''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text,''' + ''' received text, message text, folder text, encodingtype int, read bool, sighash blob,''' + ''' UNIQUE(msgid) ON CONFLICT REPLACE)''') + self.cur.execute( + '''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text,''' + ''' message text, ackdata blob, senttime integer, lastactiontime integer,''' + ''' sleeptill integer, status text, retrynumber integer, folder text, encodingtype int, ttl int)''') + self.cur.execute( + '''CREATE TABLE subscriptions (label text, address text, enabled bool)''') + self.cur.execute( + '''CREATE TABLE addressbook (label text, address text, UNIQUE(address) ON CONFLICT IGNORE)''') + self.cur.execute( + '''CREATE TABLE blacklist (label text, address text, enabled bool)''') + self.cur.execute( + '''CREATE TABLE whitelist (label text, address text, enabled bool)''') + self.cur.execute( + '''CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int,''' + ''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''') + self.cur.execute( + '''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob,''' + ''' expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''') + self.cur.execute( + '''INSERT INTO subscriptions VALUES''' + '''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''') + self.cur.execute( + '''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''') + self.cur.execute('''INSERT INTO settings VALUES('version','11')''') + self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', ( + int(time.time()),)) + self.cur.execute( + '''CREATE TABLE objectprocessorqueue''' + ''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') + self.conn.commit() + logger.info('Created messages database file') + except Exception as err: + if str(err) == 'table inbox already exists': + logger.debug('Database file already exists.') + + else: + sys.stderr.write( + 'ERROR trying to create database file (message.dat). Error message: %s\n' % str(err)) + os._exit(0) + + # If the settings version is equal to 2 or 3 then the + # sqlThread will modify the pubkeys table and change + # the settings version to 4. + settingsversion = BMConfigParser().getint( + 'bitmessagesettings', 'settingsversion') + + # People running earlier versions of PyBitmessage do not have the + # usedpersonally field in their pubkeys table. Let's add it. + if settingsversion == 2: + item = '''ALTER TABLE pubkeys ADD usedpersonally text DEFAULT 'no' ''' + parameters = '' + self.cur.execute(item, parameters) + self.conn.commit() + + settingsversion = 3 + + # People running earlier versions of PyBitmessage do not have the + # encodingtype field in their inbox and sent tables or the read field + # in the inbox table. Let's add them. + if settingsversion == 3: + item = '''ALTER TABLE inbox ADD encodingtype int DEFAULT '2' ''' + parameters = '' + self.cur.execute(item, parameters) + + item = '''ALTER TABLE inbox ADD read bool DEFAULT '1' ''' + parameters = '' + self.cur.execute(item, parameters) + + item = '''ALTER TABLE sent ADD encodingtype int DEFAULT '2' ''' + parameters = '' + self.cur.execute(item, parameters) + self.conn.commit() + + settingsversion = 4 + + BMConfigParser().set( + 'bitmessagesettings', 'settingsversion', str(settingsversion)) + BMConfigParser().save() + + helper_startup.updateConfig() + + # From now on, let us keep a 'version' embedded in the messages.dat + # file so that when we make changes to the database, the database + # version we are on can stay embedded in the messages.dat file. Let us + # check to see if the settings table exists yet. + item = '''SELECT name FROM sqlite_master WHERE type='table' AND name='settings';''' + parameters = '' + self.cur.execute(item, parameters) + if self.cur.fetchall() == []: + # The settings table doesn't exist. We need to make it. + logger.debug( + "In messages.dat database, creating new 'settings' table.") + self.cur.execute( + '''CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE)''') + self.cur.execute('''INSERT INTO settings VALUES('version','1')''') + self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', ( + int(time.time()),)) + logger.debug('In messages.dat database, removing an obsolete field from the pubkeys table.') + self.cur.execute( + '''CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int,''' + ''' usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE);''') + self.cur.execute( + '''INSERT INTO pubkeys_backup SELECT hash, transmitdata, time, usedpersonally FROM pubkeys;''') + self.cur.execute('''DROP TABLE pubkeys''') + self.cur.execute( + '''CREATE TABLE pubkeys''' + ''' (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE)''') + self.cur.execute( + '''INSERT INTO pubkeys SELECT hash, transmitdata, time, usedpersonally FROM pubkeys_backup;''') + self.cur.execute('''DROP TABLE pubkeys_backup;''') + logger.debug( + 'Deleting all pubkeys from inventory.' + ' They will be redownloaded and then saved with the correct times.') + self.cur.execute( + '''delete from inventory where objecttype = 'pubkey';''') + logger.debug('replacing Bitmessage announcements mailing list with a new one.') + self.cur.execute( + '''delete from subscriptions where address='BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx' ''') + self.cur.execute( + '''INSERT INTO subscriptions VALUES''' + '''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''') + logger.debug('Commiting.') + self.conn.commit() + logger.debug('Vacuuming message.dat. You might notice that the file size gets much smaller.') + self.cur.execute(''' VACUUM ''') + + # After code refactoring, the possible status values for sent messages + # have changed. + self.cur.execute( + '''update sent set status='doingmsgpow' where status='doingpow' ''') + self.cur.execute( + '''update sent set status='msgsent' where status='sentmessage' ''') + self.cur.execute( + '''update sent set status='doingpubkeypow' where status='findingpubkey' ''') + self.cur.execute( + '''update sent set status='broadcastqueued' where status='broadcastpending' ''') + self.conn.commit() + + # Let's get rid of the first20bytesofencryptedmessage field in + # the inventory table. + item = '''SELECT value FROM settings WHERE key='version';''' + parameters = '' + self.cur.execute(item, parameters) + if int(self.cur.fetchall()[0][0]) == 2: + logger.debug( + 'In messages.dat database, removing an obsolete field from' + ' the inventory table.') + self.cur.execute( + '''CREATE TEMPORARY TABLE inventory_backup''' + '''(hash blob, objecttype text, streamnumber int, payload blob,''' + ''' receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE);''') + self.cur.execute( + '''INSERT INTO inventory_backup SELECT hash, objecttype, streamnumber, payload, receivedtime''' + ''' FROM inventory;''') + self.cur.execute('''DROP TABLE inventory''') + self.cur.execute( + '''CREATE TABLE inventory''' + ''' (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer,''' + ''' UNIQUE(hash) ON CONFLICT REPLACE)''') + self.cur.execute( + '''INSERT INTO inventory SELECT hash, objecttype, streamnumber, payload, receivedtime''' + ''' FROM inventory_backup;''') + self.cur.execute('''DROP TABLE inventory_backup;''') + item = '''update settings set value=? WHERE key='version';''' + parameters = (3,) + self.cur.execute(item, parameters) + + # Add a new column to the inventory table to store tags. + item = '''SELECT value FROM settings WHERE key='version';''' + parameters = '' + self.cur.execute(item, parameters) + currentVersion = int(self.cur.fetchall()[0][0]) + if currentVersion == 1 or currentVersion == 3: + logger.debug( + 'In messages.dat database, adding tag field to' + ' the inventory table.') + item = '''ALTER TABLE inventory ADD tag blob DEFAULT '' ''' + parameters = '' + self.cur.execute(item, parameters) + item = '''update settings set value=? WHERE key='version';''' + parameters = (4,) + self.cur.execute(item, parameters) + + # Add a new column to the pubkeys table to store the address version. + # We're going to trash all of our pubkeys and let them be redownloaded. + item = '''SELECT value FROM settings WHERE key='version';''' + parameters = '' + self.cur.execute(item, parameters) + currentVersion = int(self.cur.fetchall()[0][0]) + if currentVersion == 4: + self.cur.execute('''DROP TABLE pubkeys''') + self.cur.execute( + '''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int,''' + '''usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''') + self.cur.execute( + '''delete from inventory where objecttype = 'pubkey';''') + item = '''update settings set value=? WHERE key='version';''' + parameters = (5,) + self.cur.execute(item, parameters) + + # Add a new table: objectprocessorqueue with which to hold objects + # that have yet to be processed if the user shuts down Bitmessage. + item = '''SELECT value FROM settings WHERE key='version';''' + parameters = '' + self.cur.execute(item, parameters) + currentVersion = int(self.cur.fetchall()[0][0]) + if currentVersion == 5: + self.cur.execute('''DROP TABLE knownnodes''') + self.cur.execute( + '''CREATE TABLE objectprocessorqueue''' + ''' (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') + item = '''update settings set value=? WHERE key='version';''' + parameters = (6,) + self.cur.execute(item, parameters) + + # changes related to protocol v3 + # In table inventory and objectprocessorqueue, objecttype is now + # an integer (it was a human-friendly string previously) + item = '''SELECT value FROM settings WHERE key='version';''' + parameters = '' + self.cur.execute(item, parameters) + currentVersion = int(self.cur.fetchall()[0][0]) + if currentVersion == 6: + logger.debug( + 'In messages.dat database, dropping and recreating' + ' the inventory table.') + self.cur.execute('''DROP TABLE inventory''') + self.cur.execute( + '''CREATE TABLE inventory''' + ''' (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer,''' + ''' tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''') + self.cur.execute('''DROP TABLE objectprocessorqueue''') + self.cur.execute( + '''CREATE TABLE objectprocessorqueue''' + ''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') + item = '''update settings set value=? WHERE key='version';''' + parameters = (7,) + self.cur.execute(item, parameters) + logger.debug( + 'Finished dropping and recreating the inventory table.') + + # The format of data stored in the pubkeys table has changed. Let's + # clear it, and the pubkeys from inventory, so that they'll + # be re-downloaded. + item = '''SELECT value FROM settings WHERE key='version';''' + parameters = '' + self.cur.execute(item, parameters) + currentVersion = int(self.cur.fetchall()[0][0]) + if currentVersion == 7: + logger.debug( + 'In messages.dat database, clearing pubkeys table' + ' because the data format has been updated.') + self.cur.execute( + '''delete from inventory where objecttype = 1;''') + self.cur.execute( + '''delete from pubkeys;''') + # Any sending messages for which we *thought* that we had + # the pubkey must be rechecked. + self.cur.execute( + '''UPDATE sent SET status='msgqueued' WHERE status='doingmsgpow' or status='badkey';''') + query = '''update settings set value=? WHERE key='version';''' + parameters = (8,) + self.cur.execute(query, parameters) + logger.debug('Finished clearing currently held pubkeys.') + + # Add a new column to the inbox table to store the hash of + # the message signature. We'll use this as temporary message UUID + # in order to detect duplicates. + item = '''SELECT value FROM settings WHERE key='version';''' + parameters = '' + self.cur.execute(item, parameters) + currentVersion = int(self.cur.fetchall()[0][0]) + if currentVersion == 8: + logger.debug( + 'In messages.dat database, adding sighash field to' + ' the inbox table.') + item = '''ALTER TABLE inbox ADD sighash blob DEFAULT '' ''' + parameters = '' + self.cur.execute(item, parameters) + item = '''update settings set value=? WHERE key='version';''' + parameters = (9,) + self.cur.execute(item, parameters) + + # We'll also need a `sleeptill` field and a `ttl` field. Also we + # can combine the pubkeyretrynumber and msgretrynumber into one. + + item = '''SELECT value FROM settings WHERE key='version';''' + parameters = '' + self.cur.execute(item, parameters) + currentVersion = int(self.cur.fetchall()[0][0]) + if currentVersion == 9: + logger.info( + 'In messages.dat database, making TTL-related changes:' + ' combining the pubkeyretrynumber and msgretrynumber' + ' fields into the retrynumber field and adding the' + ' sleeptill and ttl fields...') + self.cur.execute( + '''CREATE TEMPORARY TABLE sent_backup''' + ''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,''' + ''' ackdata blob, lastactiontime integer, status text, retrynumber integer,''' + ''' folder text, encodingtype int)''') + self.cur.execute( + '''INSERT INTO sent_backup SELECT msgid, toaddress, toripe, fromaddress,''' + ''' subject, message, ackdata, lastactiontime,''' + ''' status, 0, folder, encodingtype FROM sent;''') + self.cur.execute('''DROP TABLE sent''') + self.cur.execute( + '''CREATE TABLE sent''' + ''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,''' + ''' ackdata blob, senttime integer, lastactiontime integer, sleeptill int, status text,''' + ''' retrynumber integer, folder text, encodingtype int, ttl int)''') + self.cur.execute( + '''INSERT INTO sent SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata,''' + ''' lastactiontime, lastactiontime, 0, status, 0, folder, encodingtype, 216000 FROM sent_backup;''') + self.cur.execute('''DROP TABLE sent_backup''') + logger.info('In messages.dat database, finished making TTL-related changes.') + logger.debug('In messages.dat database, adding address field to the pubkeys table.') + # We're going to have to calculate the address for each row in the pubkeys + # table. Then we can take out the hash field. + self.cur.execute('''ALTER TABLE pubkeys ADD address text DEFAULT '' ;''') + + # replica for loop to update hashed address + self.cur.execute('''UPDATE pubkeys SET address=(enaddr(pubkeys.addressversion, 1, hash)); ''') + + # Now we can remove the hash field from the pubkeys table. + self.cur.execute( + '''CREATE TEMPORARY TABLE pubkeys_backup''' + ''' (address text, addressversion int, transmitdata blob, time int,''' + ''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''') + self.cur.execute( + '''INSERT INTO pubkeys_backup''' + ''' SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys;''') + self.cur.execute('''DROP TABLE pubkeys''') + self.cur.execute( + '''CREATE TABLE pubkeys''' + ''' (address text, addressversion int, transmitdata blob, time int, usedpersonally text,''' + ''' UNIQUE(address) ON CONFLICT REPLACE)''') + self.cur.execute( + '''INSERT INTO pubkeys SELECT''' + ''' address, addressversion, transmitdata, time, usedpersonally FROM pubkeys_backup;''') + self.cur.execute('''DROP TABLE pubkeys_backup''') + logger.debug( + 'In messages.dat database, done adding address field to the pubkeys table' + ' and removing the hash field.') + self.cur.execute('''update settings set value=10 WHERE key='version';''') + + # Update the address colunm to unique in addressbook table + item = '''SELECT value FROM settings WHERE key='version';''' + parameters = '' + self.cur.execute(item, parameters) + currentVersion = int(self.cur.fetchall()[0][0]) + if currentVersion == 10: + logger.debug( + 'In messages.dat database, updating address column to UNIQUE' + ' in the addressbook table.') + self.cur.execute( + '''ALTER TABLE addressbook RENAME TO old_addressbook''') + self.cur.execute( + '''CREATE TABLE addressbook''' + ''' (label text, address text, UNIQUE(address) ON CONFLICT IGNORE)''') + self.cur.execute( + '''INSERT INTO addressbook SELECT label, address FROM old_addressbook;''') + self.cur.execute('''DROP TABLE old_addressbook''') + self.cur.execute('''update settings set value=11 WHERE key='version';''') + + # Are you hoping to add a new option to the keys.dat file of existing + # Bitmessage users or modify the SQLite database? Add it right + # above this line! + + try: + testpayload = '\x00\x00' + t = ('1234', 1, testpayload, '12345678', 'no') + self.cur.execute('''INSERT INTO pubkeys VALUES(?,?,?,?,?)''', t) + self.conn.commit() + self.cur.execute( + '''SELECT transmitdata FROM pubkeys WHERE address='1234' ''') + queryreturn = self.cur.fetchall() + for row in queryreturn: + transmitdata, = row + self.cur.execute('''DELETE FROM pubkeys WHERE address='1234' ''') + self.conn.commit() + if transmitdata == '': + logger.fatal( + 'Problem: The version of SQLite you have cannot store Null values.' + ' Please download and install the latest revision of your version of Python' + ' (for example, the latest Python 2.7 revision) and try again.\n') + logger.fatal( + 'PyBitmessage will now exit very abruptly.' + ' You may now see threading errors related to this abrupt exit' + ' but the problem you need to solve is related to SQLite.\n\n') + os._exit(0) + except Exception as err: + if str(err) == 'database or disk is full': + logger.fatal( + '(While null value test) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + _translate( + "MainWindow", + "Disk full"), + _translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) + os._exit(0) + else: + logger.error(err) + + # Let us check to see the last time we vaccumed the messages.dat file. + # If it has been more than a month let's do it now. + item = '''SELECT value FROM settings WHERE key='lastvacuumtime';''' + parameters = '' + self.cur.execute(item, parameters) + queryreturn = self.cur.fetchall() + for row in queryreturn: + value, = row + if int(value) < int(time.time()) - 86400: + logger.info('It has been a long time since the messages.dat file has been vacuumed. Vacuuming now...') + try: + self.cur.execute(''' VACUUM ''') + except Exception as err: + if str(err) == 'database or disk is full': + logger.fatal( + '(While VACUUM) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + _translate( + "MainWindow", + "Disk full"), + _translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) + os._exit(0) + item = '''update settings set value=? WHERE key='lastvacuumtime';''' + parameters = (int(time.time()),) + self.cur.execute(item, parameters) + + helper_sql.sql_ready.set() + + while True: + item = helper_sql.sqlSubmitQueue.get() + if item == 'commit': + try: + self.conn.commit() + except Exception as err: + if str(err) == 'database or disk is full': + logger.fatal( + '(While committing) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + _translate( + "MainWindow", + "Disk full"), + _translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) + os._exit(0) + elif item == 'exit': + self.conn.close() + logger.info('sqlThread exiting gracefully.') + + return + elif item == 'movemessagstoprog': + logger.debug('the sqlThread is moving the messages.dat file to the local program directory.') + + try: + self.conn.commit() + except Exception as err: + if str(err) == 'database or disk is full': + logger.fatal( + '(while movemessagstoprog) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + _translate( + "MainWindow", + "Disk full"), + _translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) + os._exit(0) + self.conn.close() + shutil.move( + paths.lookupAppdataFolder() + 'messages.dat', paths.lookupExeFolder() + 'messages.dat') + self.conn = sqlite3.connect(paths.lookupExeFolder() + 'messages.dat') + self.conn.text_factory = str + self.cur = self.conn.cursor() + elif item == 'movemessagstoappdata': + logger.debug('the sqlThread is moving the messages.dat file to the Appdata folder.') + + try: + self.conn.commit() + except Exception as err: + if str(err) == 'database or disk is full': + logger.fatal( + '(while movemessagstoappdata) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + _translate( + "MainWindow", + "Disk full"), + _translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) + os._exit(0) + self.conn.close() + shutil.move( + paths.lookupExeFolder() + 'messages.dat', paths.lookupAppdataFolder() + 'messages.dat') + self.conn = sqlite3.connect(paths.lookupAppdataFolder() + 'messages.dat') + self.conn.text_factory = str + self.cur = self.conn.cursor() + elif item == 'deleteandvacuume': + self.cur.execute('''delete from inbox where folder='trash' ''') + self.cur.execute('''delete from sent where folder='trash' ''') + self.conn.commit() + try: + self.cur.execute(''' VACUUM ''') + except Exception as err: + if str(err) == 'database or disk is full': + logger.fatal( + '(while deleteandvacuume) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + _translate( + "MainWindow", + "Disk full"), + _translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) + os._exit(0) + else: + parameters = helper_sql.sqlSubmitQueue.get() + rowcount = 0 + try: + self.cur.execute(item, parameters) + rowcount = self.cur.rowcount + except Exception as err: + if str(err) == 'database or disk is full': + logger.fatal( + '(while cur.execute) Alert: Your disk or data storage volume is full.' + ' sqlThread will now exit.') + queues.UISignalQueue.put(( + 'alert', ( + _translate( + "MainWindow", + "Disk full"), + _translate( + "MainWindow", + 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), + True))) + os._exit(0) + else: + logger.fatal( + 'Major error occurred when trying to execute a SQL statement within the sqlThread.' + ' Please tell Atheros about this error message or post it in the forum!' + ' Error occurred while trying to execute statement: "%s" Here are the parameters;' + ' you might want to censor this data with asterisks (***)' + ' as it can contain private information: %s.' + ' Here is the actual error message thrown by the sqlThread: %s', + str(item), + str(repr(parameters)), + str(err)) + logger.fatal('This program shall now abruptly exit!') + + os._exit(0) + + helper_sql.sqlReturnQueue.put((self.cur.fetchall(), rowcount)) + # helper_sql.sqlSubmitQueue.task_done() + + def create_function(self): + # create_function + try: + self.conn.create_function("enaddr", 3, func=encodeAddress, deterministic=True) + except (TypeError, sqlite3.NotSupportedError) as err: + logger.debug( + "Got error while pass deterministic in sqlite create function {}, Passing 3 params".format(err)) + self.conn.create_function("enaddr", 3, encodeAddress) diff --git a/src/tests/mock/pybitmessage/debug.py b/src/tests/mock/pybitmessage/debug.py new file mode 100644 index 00000000..a70cb543 --- /dev/null +++ b/src/tests/mock/pybitmessage/debug.py @@ -0,0 +1,157 @@ +""" +Logging and debuging facility +----------------------------- + +Levels: + + DEBUG + Detailed information, typically of interest only when diagnosing problems. + INFO + Confirmation that things are working as expected. + WARNING + An indication that something unexpected happened, or indicative of + some problem in the near future (e.g. 'disk space low'). The software + is still working as expected. + ERROR + Due to a more serious problem, the software has not been able to + perform some function. + CRITICAL + A serious error, indicating that the program itself may be unable to + continue running. + +There are three loggers by default: `console_only`, `file_only` and `both`. +You can configure logging in the logging.dat in the appdata dir. +It's format is described in the :func:`logging.config.fileConfig` doc. + +Use: + +>>> import logging +>>> logger = logging.getLogger('default') + +The old form: ``from debug import logger`` is also may be used, +but only in the top level modules. + +Logging is thread-safe so you don't have to worry about locks, +just import and log. +""" + +import logging +import logging.config +import os +import sys + +from six.moves import configparser + +import helper_startup +import state + +helper_startup.loadConfig() + +# Now can be overriden from a config file, which uses standard python +# logging.config.fileConfig interface +# examples are here: +# https://bitmessage.org/forum/index.php/topic,4820.msg11163.html#msg11163 +log_level = 'WARNING' + + +def log_uncaught_exceptions(ex_cls, ex, tb): + """The last resort logging function used for sys.excepthook""" + logging.critical('Unhandled exception', exc_info=(ex_cls, ex, tb)) + + +def configureLogging(): + """ + Configure logging, + using either logging.dat file in the state.appdata dir + or dictionary with hardcoded settings. + """ + sys.excepthook = log_uncaught_exceptions + fail_msg = '' + try: + logging_config = os.path.join(state.appdata, 'logging.dat') + logging.config.fileConfig( + logging_config, disable_existing_loggers=False) + return ( + False, + 'Loaded logger configuration from %s' % logging_config + ) + except (OSError, configparser.NoSectionError, KeyError): + if os.path.isfile(logging_config): + fail_msg = \ + 'Failed to load logger configuration from %s, using default' \ + ' logging config\n%s' % \ + (logging_config, sys.exc_info()) + else: + # no need to confuse the user if the logger config + # is missing entirely + fail_msg = 'Using default logger configuration' + + logging_config = { + 'version': 1, + 'formatters': { + 'default': { + 'format': u'%(asctime)s - %(levelname)s - %(message)s', + }, + }, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'default', + 'level': log_level, + 'stream': 'ext://sys.stderr' + }, + 'file': { + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'level': log_level, + 'filename': os.path.join(state.appdata, 'debug.log'), + 'maxBytes': 2097152, # 2 MiB + 'backupCount': 1, + 'encoding': 'UTF-8', + } + }, + 'loggers': { + 'console_only': { + 'handlers': ['console'], + 'propagate': 0 + }, + 'file_only': { + 'handlers': ['file'], + 'propagate': 0 + }, + 'both': { + 'handlers': ['console', 'file'], + 'propagate': 0 + }, + }, + 'root': { + 'level': log_level, + 'handlers': ['console'], + }, + } + + logging_config['loggers']['default'] = logging_config['loggers'][ + 'file_only' if '-c' in sys.argv else 'both'] + logging.config.dictConfig(logging_config) + + return True, fail_msg + + +def resetLogging(): + """Reconfigure logging in runtime when state.appdata dir changed""" + # pylint: disable=global-statement, used-before-assignment + global logger + for i in logger.handlers: + logger.removeHandler(i) + i.flush() + i.close() + configureLogging() + logger = logging.getLogger('default') + + +# ! + +preconfigured, msg = configureLogging() +logger = logging.getLogger('default') +if msg: + logger.log(logging.WARNING if preconfigured else logging.INFO, msg) diff --git a/src/tests/mock/pybitmessage/defaults.py b/src/tests/mock/pybitmessage/defaults.py new file mode 100644 index 00000000..32162b56 --- /dev/null +++ b/src/tests/mock/pybitmessage/defaults.py @@ -0,0 +1,24 @@ +""" +Common default values +""" + +#: sanity check, prevent doing ridiculous PoW +#: 20 million PoWs equals approximately 2 days on dev's dual R9 290 +ridiculousDifficulty = 20000000 + +#: Remember here the RPC port read from namecoin.conf so we can restore to +#: it as default whenever the user changes the "method" selection for +#: namecoin integration to "namecoind". +namecoinDefaultRpcPort = "8336" + +# If changed, these values will cause particularly unexpected behavior: +# You won't be able to either send or receive messages because the proof +# of work you do (or demand) won't match that done or demanded by others. +# Don't change them! +#: The amount of work that should be performed (and demanded) per byte +#: of the payload. +networkDefaultProofOfWorkNonceTrialsPerByte = 1000 +#: To make sending short messages a little more difficult, this value is +#: added to the payload length for use in calculating the proof of work +#: target. +networkDefaultPayloadLengthExtraBytes = 1000 diff --git a/src/tests/mock/pybitmessage/depends.py b/src/tests/mock/pybitmessage/depends.py new file mode 100755 index 00000000..268137ec --- /dev/null +++ b/src/tests/mock/pybitmessage/depends.py @@ -0,0 +1,450 @@ +""" +Utility functions to check the availability of dependencies +and suggest how it may be installed +""" + +import os +import re +import sys + +# Only really old versions of Python don't have sys.hexversion. We don't +# support them. The logging module was introduced in Python 2.3 +if not hasattr(sys, 'hexversion') or sys.hexversion < 0x20300F0: + sys.exit( + 'Python version: %s\n' + 'PyBitmessage requires Python 2.7.4 or greater (but not Python 3)' + % sys.version + ) + +import logging # noqa:E402 +import subprocess + +from importlib import import_module + +# We can now use logging so set up a simple configuration +formatter = logging.Formatter('%(levelname)s: %(message)s') +handler = logging.StreamHandler(sys.stdout) +handler.setFormatter(formatter) +logger = logging.getLogger('both') +logger.addHandler(handler) +logger.setLevel(logging.ERROR) + + +OS_RELEASE = { + "Debian GNU/Linux".lower(): "Debian", + "fedora": "Fedora", + "opensuse": "openSUSE", + "ubuntu": "Ubuntu", + "gentoo": "Gentoo", + "calculate": "Gentoo" +} + +PACKAGE_MANAGER = { + "OpenBSD": "pkg_add", + "FreeBSD": "pkg install", + "Debian": "apt-get install", + "Ubuntu": "apt-get install", + "Ubuntu 12": "apt-get install", + "openSUSE": "zypper install", + "Fedora": "dnf install", + "Guix": "guix package -i", + "Gentoo": "emerge" +} + +PACKAGES = { + "PyQt4": { + "OpenBSD": "py-qt4", + "FreeBSD": "py27-qt4", + "Debian": "python-qt4", + "Ubuntu": "python-qt4", + "Ubuntu 12": "python-qt4", + "openSUSE": "python-qt", + "Fedora": "PyQt4", + "Guix": "python2-pyqt@4.11.4", + "Gentoo": "dev-python/PyQt4", + "optional": True, + "description": + "You only need PyQt if you want to use the GUI." + " When only running as a daemon, this can be skipped.\n" + "However, you would have to install it manually" + " because setuptools does not support PyQt." + }, + "msgpack": { + "OpenBSD": "py-msgpack", + "FreeBSD": "py27-msgpack-python", + "Debian": "python-msgpack", + "Ubuntu": "python-msgpack", + "Ubuntu 12": "msgpack-python", + "openSUSE": "python-msgpack-python", + "Fedora": "python2-msgpack", + "Guix": "python2-msgpack", + "Gentoo": "dev-python/msgpack", + "optional": True, + "description": + "python-msgpack is recommended for improved performance of" + " message encoding/decoding" + }, + "pyopencl": { + "FreeBSD": "py27-pyopencl", + "Debian": "python-pyopencl", + "Ubuntu": "python-pyopencl", + "Ubuntu 12": "python-pyopencl", + "Fedora": "python2-pyopencl", + "openSUSE": "", + "OpenBSD": "", + "Guix": "", + "Gentoo": "dev-python/pyopencl", + "optional": True, + "description": + "If you install pyopencl, you will be able to use" + " GPU acceleration for proof of work.\n" + "You also need a compatible GPU and drivers." + }, + "setuptools": { + "OpenBSD": "py-setuptools", + "FreeBSD": "py27-setuptools", + "Debian": "python-setuptools", + "Ubuntu": "python-setuptools", + "Ubuntu 12": "python-setuptools", + "Fedora": "python2-setuptools", + "openSUSE": "python-setuptools", + "Guix": "python2-setuptools", + "Gentoo": "dev-python/setuptools", + "optional": False, + } +} + + +def detectOS(): + """Finding out what Operating System is running""" + if detectOS.result is not None: + return detectOS.result + if sys.platform.startswith('openbsd'): + detectOS.result = "OpenBSD" + elif sys.platform.startswith('freebsd'): + detectOS.result = "FreeBSD" + elif sys.platform.startswith('win'): + detectOS.result = "Windows" + elif os.path.isfile("/etc/os-release"): + detectOSRelease() + elif os.path.isfile("/etc/config.scm"): + detectOS.result = "Guix" + return detectOS.result + + +detectOS.result = None + + +def detectOSRelease(): + """Detecting the release of OS""" + with open("/etc/os-release", 'r') as osRelease: + version = None + for line in osRelease: + if line.startswith("NAME="): + detectOS.result = OS_RELEASE.get( + line.replace('"', '').split("=")[-1].strip().lower()) + elif line.startswith("VERSION_ID="): + try: + version = float(line.split("=")[1].replace("\"", "")) + except ValueError: + pass + if detectOS.result == "Ubuntu" and version < 14: + detectOS.result = "Ubuntu 12" + + +def try_import(module, log_extra=False): + """Try to import the non imported packages""" + try: + return import_module(module) + except ImportError: + module = module.split('.')[0] + logger.error('The %s module is not available.', module) + if log_extra: + logger.error(log_extra) + dist = detectOS() + logger.error( + 'On %s, try running "%s %s" as root.', + dist, PACKAGE_MANAGER[dist], PACKAGES[module][dist]) + return False + + +def check_ripemd160(): + """Check availability of the RIPEMD160 hash function""" + try: + from fallback import RIPEMD160Hash # pylint: disable=relative-import + except ImportError: + return False + return RIPEMD160Hash is not None + + +def check_sqlite(): + """Do sqlite check. + + Simply check sqlite3 module if exist or not with hexversion + support in python version for specifieed platform. + """ + if sys.hexversion < 0x020500F0: + logger.error( + 'The sqlite3 module is not included in this version of Python.') + if sys.platform.startswith('freebsd'): + logger.error( + 'On FreeBSD, try running "pkg install py27-sqlite3" as root.') + return False + + sqlite3 = try_import('sqlite3') + if not sqlite3: + return False + + logger.info('sqlite3 Module Version: %s', sqlite3.version) + logger.info('SQLite Library Version: %s', sqlite3.sqlite_version) + # sqlite_version_number formula: https://sqlite.org/c3ref/c_source_id.html + sqlite_version_number = ( + sqlite3.sqlite_version_info[0] * 1000000 + + sqlite3.sqlite_version_info[1] * 1000 + + sqlite3.sqlite_version_info[2] + ) + + conn = None + try: + try: + conn = sqlite3.connect(':memory:') + if sqlite_version_number >= 3006018: + sqlite_source_id = conn.execute( + 'SELECT sqlite_source_id();' + ).fetchone()[0] + logger.info('SQLite Library Source ID: %s', sqlite_source_id) + if sqlite_version_number >= 3006023: + compile_options = ', '.join( + [row[0] for row in conn.execute('PRAGMA compile_options;')]) + logger.info( + 'SQLite Library Compile Options: %s', compile_options) + # There is no specific version requirement as yet, so we just + # use the first version that was included with Python. + if sqlite_version_number < 3000008: + logger.error( + 'This version of SQLite is too old.' + ' PyBitmessage requires SQLite 3.0.8 or later') + return False + return True + except sqlite3.Error: + logger.exception('An exception occured while checking sqlite.') + return False + finally: + if conn: + conn.close() + + +def check_openssl(): + """Do openssl dependency check. + + Here we are checking for openssl with its all dependent libraries + and version checking. + """ + # pylint: disable=too-many-branches, too-many-return-statements + # pylint: disable=protected-access, redefined-outer-name + ctypes = try_import('ctypes') + if not ctypes: + logger.error('Unable to check OpenSSL.') + return False + + # We need to emulate the way PyElliptic searches for OpenSSL. + if sys.platform == 'win32': + paths = ['libeay32.dll'] + if getattr(sys, 'frozen', False): + paths.insert(0, os.path.join(sys._MEIPASS, 'libeay32.dll')) + else: + paths = ['libcrypto.so', 'libcrypto.so.1.0.0'] + if sys.platform == 'darwin': + paths.extend([ + 'libcrypto.dylib', + '/usr/local/opt/openssl/lib/libcrypto.dylib', + './../Frameworks/libcrypto.dylib' + ]) + + if re.match(r'linux|darwin|freebsd', sys.platform): + try: + import ctypes.util + path = ctypes.util.find_library('ssl') + if path not in paths: + paths.append(path) + except: # noqa:E722 + pass + + openssl_version = None + openssl_hexversion = None + openssl_cflags = None + + cflags_regex = re.compile(r'(?:OPENSSL_NO_)(AES|EC|ECDH|ECDSA)(?!\w)') + + import pyelliptic.openssl + + for path in paths: + logger.info('Checking OpenSSL at %s', path) + try: + library = ctypes.CDLL(path) + except OSError: + continue + logger.info('OpenSSL Name: %s', library._name) + try: + openssl_version, openssl_hexversion, openssl_cflags = \ + pyelliptic.openssl.get_version(library) + except AttributeError: # sphinx chokes + return True + if not openssl_version: + logger.error('Cannot determine version of this OpenSSL library.') + return False + logger.info('OpenSSL Version: %s', openssl_version) + logger.info('OpenSSL Compile Options: %s', openssl_cflags) + # PyElliptic uses EVP_CIPHER_CTX_new and EVP_CIPHER_CTX_free which were + # introduced in 0.9.8b. + if openssl_hexversion < 0x90802F: + logger.error( + 'This OpenSSL library is too old. PyBitmessage requires' + ' OpenSSL 0.9.8b or later with AES, Elliptic Curves (EC),' + ' ECDH, and ECDSA enabled.') + return False + matches = cflags_regex.findall(openssl_cflags.decode('utf-8', "ignore")) + if matches: + logger.error( + 'This OpenSSL library is missing the following required' + ' features: %s. PyBitmessage requires OpenSSL 0.9.8b' + ' or later with AES, Elliptic Curves (EC), ECDH,' + ' and ECDSA enabled.', ', '.join(matches)) + return False + return True + return False + + +# ..todo:: The minimum versions of pythondialog and dialog need to be determined +def check_curses(): + """Do curses dependency check. + + Here we are checking for curses if available or not with check as interface + requires the `pythondialog `_ package + and the dialog utility. + """ + if sys.hexversion < 0x20600F0: + logger.error( + 'The curses interface requires the pythondialog package and' + ' the dialog utility.') + return False + curses = try_import('curses') + if not curses: + logger.error('The curses interface can not be used.') + return False + + logger.info('curses Module Version: %s', curses.version) + + dialog = try_import('dialog') + if not dialog: + logger.error('The curses interface can not be used.') + return False + + try: + subprocess.check_call(['which', 'dialog']) + except subprocess.CalledProcessError: + logger.error( + 'Curses requires the `dialog` command to be installed as well as' + ' the python library.') + return False + + logger.info('pythondialog Package Version: %s', dialog.__version__) + dialog_util_version = dialog.Dialog().cached_backend_version + # The pythondialog author does not like Python2 str, so we have to use + # unicode for just the version otherwise we get the repr form which + # includes the module and class names along with the actual version. + logger.info('dialog Utility Version %s', dialog_util_version.decode('utf-8')) + return True + + +def check_pyqt(): + """Do pyqt dependency check. + + Here we are checking for PyQt4 with its version, as for it require + PyQt 4.8 or later. + """ + QtCore = try_import( + 'PyQt4.QtCore', 'PyBitmessage requires PyQt 4.8 or later and Qt 4.7 or later.') + + if not QtCore: + return False + + logger.info('PyQt Version: %s', QtCore.PYQT_VERSION_STR) + logger.info('Qt Version: %s', QtCore.QT_VERSION_STR) + passed = True + if QtCore.PYQT_VERSION < 0x40800: + logger.error( + 'This version of PyQt is too old. PyBitmessage requries' + ' PyQt 4.8 or later.') + passed = False + if QtCore.QT_VERSION < 0x40700: + logger.error( + 'This version of Qt is too old. PyBitmessage requries' + ' Qt 4.7 or later.') + passed = False + return passed + + +def check_msgpack(): + """Do sgpack module check. + + simply checking if msgpack package with all its dependency + is available or not as recommended for messages coding. + """ + return try_import( + 'msgpack', 'It is highly recommended for messages coding.') is not False + + +def check_dependencies(verbose=False, optional=False): + """Do dependency check. + + It identifies project dependencies and checks if there are + any known, publicly disclosed, vulnerabilities.basically + scan applications (and their dependent libraries) so that + easily identify any known vulnerable components. + """ + if verbose: + logger.setLevel(logging.INFO) + + has_all_dependencies = True + + # Python 2.7.4 is the required minimum. + # (https://bitmessage.org/forum/index.php?topic=4081.0) + # Python 3+ is not supported, but it is still useful to provide + # information about our other requirements. + logger.info('Python version: %s', sys.version) + if sys.hexversion < 0x20704F0: + logger.error( + 'PyBitmessage requires Python 2.7.4 or greater' + ' (but not Python 3+)') + has_all_dependencies = False + if sys.hexversion >= 0x3000000: + logger.error( + 'PyBitmessage does not support Python 3+. Python 2.7.4' + ' or greater is required. Python 2.7.18 is recommended.') + sys.exit() + + # FIXME: This needs to be uncommented when more of the code is python3 compatible + # if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3060000: + # print("PyBitmessage requires python >= 3.6 if using python 3") + + check_functions = [check_ripemd160, check_sqlite, check_openssl] + if optional: + check_functions.extend([check_msgpack, check_pyqt, check_curses]) + + # Unexpected exceptions are handled here + for check in check_functions: + try: + has_all_dependencies &= check() + except: # noqa:E722 + logger.exception('%s failed unexpectedly.', check.__name__) + has_all_dependencies = False + + if not has_all_dependencies: + sys.exit( + 'PyBitmessage cannot start. One or more dependencies are' + ' unavailable.' + ) + + +logger.setLevel(0) diff --git a/src/tests/mock/pybitmessage/fallback/__init__.py b/src/tests/mock/pybitmessage/fallback/__init__.py new file mode 100644 index 00000000..9a8d646f --- /dev/null +++ b/src/tests/mock/pybitmessage/fallback/__init__.py @@ -0,0 +1,32 @@ +""" +Fallback expressions help PyBitmessage modules to run without some external +dependencies. + + +RIPEMD160Hash +------------- + +We need to check :mod:`hashlib` for RIPEMD-160, as it won't be available +if OpenSSL is not linked against or the linked OpenSSL has RIPEMD disabled. +Try to use `pycryptodome `_ +in that case. +""" + +import hashlib + +try: + hashlib.new('ripemd160') +except ValueError: + try: + from Crypto.Hash import RIPEMD + except ImportError: + RIPEMD160Hash = None + else: + RIPEMD160Hash = RIPEMD.RIPEMD160Hash +else: + def RIPEMD160Hash(data=None): + """hashlib based RIPEMD160Hash""" + hasher = hashlib.new('ripemd160') + if data: + hasher.update(data) + return hasher diff --git a/src/tests/mock/pybitmessage/fallback/umsgpack/__init__.py b/src/tests/mock/pybitmessage/fallback/umsgpack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/tests/mock/pybitmessage/fallback/umsgpack/umsgpack.py b/src/tests/mock/pybitmessage/fallback/umsgpack/umsgpack.py new file mode 100644 index 00000000..34938614 --- /dev/null +++ b/src/tests/mock/pybitmessage/fallback/umsgpack/umsgpack.py @@ -0,0 +1,1067 @@ +# u-msgpack-python v2.4.1 - v at sergeev.io +# https://github.com/vsergeev/u-msgpack-python +# +# u-msgpack-python is a lightweight MessagePack serializer and deserializer +# module, compatible with both Python 2 and 3, as well CPython and PyPy +# implementations of Python. u-msgpack-python is fully compliant with the +# latest MessagePack specification.com/msgpack/msgpack/blob/master/spec.md). In +# particular, it supports the new binary, UTF-8 string, and application ext +# types. +# +# MIT License +# +# Copyright (c) 2013-2016 vsergeev / Ivan (Vanya) A. Sergeev +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +""" +src/fallback/umsgpack/umsgpack.py +================================= + +u-msgpack-python v2.4.1 - v at sergeev.io +https://github.com/vsergeev/u-msgpack-python + +u-msgpack-python is a lightweight MessagePack serializer and deserializer +module, compatible with both Python 2 and 3, as well CPython and PyPy +implementations of Python. u-msgpack-python is fully compliant with the +latest MessagePack specification.com/msgpack/msgpack/blob/master/spec.md). In +particular, it supports the new binary, UTF-8 string, and application ext +types. + +License: MIT +""" +# pylint: disable=too-many-lines,too-many-branches,too-many-statements,global-statement,too-many-return-statements +# pylint: disable=unused-argument + +import collections +import io +import struct +import sys + +__version__ = "2.4.1" +"Module version string" + +version = (2, 4, 1) +"Module version tuple" + + +############################################################################## +# Ext Class +############################################################################## + +# Extension type for application-defined types and data +class Ext: # pylint: disable=old-style-class + """ + The Ext class facilitates creating a serializable extension object to store + an application-defined type and data byte array. + """ + + def __init__(self, type, data): + """ + Construct a new Ext object. + + Args: + type: application-defined type integer from 0 to 127 + data: application-defined data byte array + + Raises: + TypeError: + Specified ext type is outside of 0 to 127 range. + + Example: + >>> foo = umsgpack.Ext(0x05, b"\x01\x02\x03") + >>> umsgpack.packb({u"special stuff": foo, u"awesome": True}) + '\x82\xa7awesome\xc3\xadspecial stuff\xc7\x03\x05\x01\x02\x03' + >>> bar = umsgpack.unpackb(_) + >>> print(bar["special stuff"]) + Ext Object (Type: 0x05, Data: 01 02 03) + >>> + """ + # pylint:disable=redefined-builtin + + # Application ext type should be 0 <= type <= 127 + if not isinstance(type, int) or not (type >= 0 and type <= 127): + raise TypeError("ext type out of range") + # Check data is type bytes + elif sys.version_info[0] == 3 and not isinstance(data, bytes): + raise TypeError("ext data is not type \'bytes\'") + elif sys.version_info[0] == 2 and not isinstance(data, str): + raise TypeError("ext data is not type \'str\'") + self.type = type + self.data = data + + def __eq__(self, other): + """ + Compare this Ext object with another for equality. + """ + return (isinstance(other, self.__class__) and + self.type == other.type and + self.data == other.data) + + def __ne__(self, other): + """ + Compare this Ext object with another for inequality. + """ + return not self.__eq__(other) + + def __str__(self): + """ + String representation of this Ext object. + """ + s = "Ext Object (Type: 0x%02x, Data: " % self.type + s += " ".join(["0x%02x" % ord(self.data[i:i + 1]) + for i in xrange(min(len(self.data), 8))]) + if len(self.data) > 8: + s += " ..." + s += ")" + return s + + def __hash__(self): + """ + Provide a hash of this Ext object. + """ + return hash((self.type, self.data)) + + +class InvalidString(bytes): + """Subclass of bytes to hold invalid UTF-8 strings.""" + pass + +############################################################################## +# Exceptions +############################################################################## + + +# Base Exception classes +class PackException(Exception): + "Base class for exceptions encountered during packing." + pass + + +class UnpackException(Exception): + "Base class for exceptions encountered during unpacking." + pass + + +# Packing error +class UnsupportedTypeException(PackException): + "Object type not supported for packing." + pass + + +# Unpacking error +class InsufficientDataException(UnpackException): + "Insufficient data to unpack the serialized object." + pass + + +class InvalidStringException(UnpackException): + "Invalid UTF-8 string encountered during unpacking." + pass + + +class ReservedCodeException(UnpackException): + "Reserved code encountered during unpacking." + pass + + +class UnhashableKeyException(UnpackException): + """ + Unhashable key encountered during map unpacking. + The serialized map cannot be deserialized into a Python dictionary. + """ + pass + + +class DuplicateKeyException(UnpackException): + "Duplicate key encountered during map unpacking." + pass + + +# Backwards compatibility +KeyNotPrimitiveException = UnhashableKeyException +KeyDuplicateException = DuplicateKeyException + +############################################################################# +# Exported Functions and Glob +############################################################################# + +# Exported functions and variables, set up in __init() +pack = None +packb = None +unpack = None +unpackb = None +dump = None +dumps = None +load = None +loads = None + +compatibility = False +u""" +Compatibility mode boolean. + +When compatibility mode is enabled, u-msgpack-python will serialize both +unicode strings and bytes into the old "raw" msgpack type, and deserialize the +"raw" msgpack type into bytes. This provides backwards compatibility with the +old MessagePack specification. + +Example: +>>> umsgpack.compatibility = True +>>> +>>> umsgpack.packb([u"some string", b"some bytes"]) +b'\x92\xabsome string\xaasome bytes' +>>> umsgpack.unpackb(_) +[b'some string', b'some bytes'] +>>> +""" + +############################################################################## +# Packing +############################################################################## + +# You may notice struct.pack("B", obj) instead of the simpler chr(obj) in the +# code below. This is to allow for seamless Python 2 and 3 compatibility, as +# chr(obj) has a str return type instead of bytes in Python 3, and +# struct.pack(...) has the right return type in both versions. + + +def _pack_integer(obj, fp, options): + if obj < 0: + if obj >= -32: + fp.write(struct.pack("b", obj)) + elif obj >= -2**(8 - 1): + fp.write(b"\xd0" + struct.pack("b", obj)) + elif obj >= -2**(16 - 1): + fp.write(b"\xd1" + struct.pack(">h", obj)) + elif obj >= -2**(32 - 1): + fp.write(b"\xd2" + struct.pack(">i", obj)) + elif obj >= -2**(64 - 1): + fp.write(b"\xd3" + struct.pack(">q", obj)) + else: + raise UnsupportedTypeException("huge signed int") + else: + if obj <= 127: + fp.write(struct.pack("B", obj)) + elif obj <= 2**8 - 1: + fp.write(b"\xcc" + struct.pack("B", obj)) + elif obj <= 2**16 - 1: + fp.write(b"\xcd" + struct.pack(">H", obj)) + elif obj <= 2**32 - 1: + fp.write(b"\xce" + struct.pack(">I", obj)) + elif obj <= 2**64 - 1: + fp.write(b"\xcf" + struct.pack(">Q", obj)) + else: + raise UnsupportedTypeException("huge unsigned int") + + +def _pack_nil(obj, fp, options): + fp.write(b"\xc0") + + +def _pack_boolean(obj, fp, options): + fp.write(b"\xc3" if obj else b"\xc2") + + +def _pack_float(obj, fp, options): + float_precision = options.get('force_float_precision', _float_precision) + + if float_precision == "double": + fp.write(b"\xcb" + struct.pack(">d", obj)) + elif float_precision == "single": + fp.write(b"\xca" + struct.pack(">f", obj)) + else: + raise ValueError("invalid float precision") + + +def _pack_string(obj, fp, options): + obj = obj.encode('utf-8') + if len(obj) <= 31: + fp.write(struct.pack("B", 0xa0 | len(obj)) + obj) + elif len(obj) <= 2**8 - 1: + fp.write(b"\xd9" + struct.pack("B", len(obj)) + obj) + elif len(obj) <= 2**16 - 1: + fp.write(b"\xda" + struct.pack(">H", len(obj)) + obj) + elif len(obj) <= 2**32 - 1: + fp.write(b"\xdb" + struct.pack(">I", len(obj)) + obj) + else: + raise UnsupportedTypeException("huge string") + + +def _pack_binary(obj, fp, options): + if len(obj) <= 2**8 - 1: + fp.write(b"\xc4" + struct.pack("B", len(obj)) + obj) + elif len(obj) <= 2**16 - 1: + fp.write(b"\xc5" + struct.pack(">H", len(obj)) + obj) + elif len(obj) <= 2**32 - 1: + fp.write(b"\xc6" + struct.pack(">I", len(obj)) + obj) + else: + raise UnsupportedTypeException("huge binary string") + + +def _pack_oldspec_raw(obj, fp, options): + if len(obj) <= 31: + fp.write(struct.pack("B", 0xa0 | len(obj)) + obj) + elif len(obj) <= 2**16 - 1: + fp.write(b"\xda" + struct.pack(">H", len(obj)) + obj) + elif len(obj) <= 2**32 - 1: + fp.write(b"\xdb" + struct.pack(">I", len(obj)) + obj) + else: + raise UnsupportedTypeException("huge raw string") + + +def _pack_ext(obj, fp, options): + if len(obj.data) == 1: + fp.write(b"\xd4" + struct.pack("B", obj.type & 0xff) + obj.data) + elif len(obj.data) == 2: + fp.write(b"\xd5" + struct.pack("B", obj.type & 0xff) + obj.data) + elif len(obj.data) == 4: + fp.write(b"\xd6" + struct.pack("B", obj.type & 0xff) + obj.data) + elif len(obj.data) == 8: + fp.write(b"\xd7" + struct.pack("B", obj.type & 0xff) + obj.data) + elif len(obj.data) == 16: + fp.write(b"\xd8" + struct.pack("B", obj.type & 0xff) + obj.data) + elif len(obj.data) <= 2**8 - 1: + fp.write(b"\xc7" + + struct.pack("BB", len(obj.data), obj.type & 0xff) + obj.data) + elif len(obj.data) <= 2**16 - 1: + fp.write(b"\xc8" + + struct.pack(">HB", len(obj.data), obj.type & 0xff) + obj.data) + elif len(obj.data) <= 2**32 - 1: + fp.write(b"\xc9" + + struct.pack(">IB", len(obj.data), obj.type & 0xff) + obj.data) + else: + raise UnsupportedTypeException("huge ext data") + + +def _pack_array(obj, fp, options): + if len(obj) <= 15: + fp.write(struct.pack("B", 0x90 | len(obj))) + elif len(obj) <= 2**16 - 1: + fp.write(b"\xdc" + struct.pack(">H", len(obj))) + elif len(obj) <= 2**32 - 1: + fp.write(b"\xdd" + struct.pack(">I", len(obj))) + else: + raise UnsupportedTypeException("huge array") + + for e in obj: + pack(e, fp, **options) + + +def _pack_map(obj, fp, options): + if len(obj) <= 15: + fp.write(struct.pack("B", 0x80 | len(obj))) + elif len(obj) <= 2**16 - 1: + fp.write(b"\xde" + struct.pack(">H", len(obj))) + elif len(obj) <= 2**32 - 1: + fp.write(b"\xdf" + struct.pack(">I", len(obj))) + else: + raise UnsupportedTypeException("huge array") + + for k, v in obj.items(): + pack(k, fp, **options) + pack(v, fp, **options) + +######################################## + + +# Pack for Python 2, with 'unicode' type, 'str' type, and 'long' type +def _pack2(obj, fp, **options): + """ + Serialize a Python object into MessagePack bytes. + + Args: + obj: a Python object + fp: a .write()-supporting file-like object + + Kwargs: + ext_handlers (dict): dictionary of Ext handlers, mapping a custom type + to a callable that packs an instance of the type + into an Ext object + force_float_precision (str): "single" to force packing floats as + IEEE-754 single-precision floats, + "double" to force packing floats as + IEEE-754 double-precision floats. + + Returns: + None. + + Raises: + UnsupportedType(PackException): + Object type not supported for packing. + + Example: + >>> f = open('test.bin', 'wb') + >>> umsgpack.pack({u"compact": True, u"schema": 0}, f) + >>> + """ + global compatibility + + ext_handlers = options.get("ext_handlers") + + if obj is None: + _pack_nil(obj, fp, options) + elif ext_handlers and obj.__class__ in ext_handlers: + _pack_ext(ext_handlers[obj.__class__](obj), fp, options) + elif isinstance(obj, bool): + _pack_boolean(obj, fp, options) + elif isinstance(obj, (int, long)): + _pack_integer(obj, fp, options) + elif isinstance(obj, float): + _pack_float(obj, fp, options) + elif compatibility and isinstance(obj, unicode): + _pack_oldspec_raw(bytes(obj), fp, options) + elif compatibility and isinstance(obj, bytes): + _pack_oldspec_raw(obj, fp, options) + elif isinstance(obj, unicode): + _pack_string(obj, fp, options) + elif isinstance(obj, str): + _pack_binary(obj, fp, options) + elif isinstance(obj, (list, tuple)): + _pack_array(obj, fp, options) + elif isinstance(obj, dict): + _pack_map(obj, fp, options) + elif isinstance(obj, Ext): + _pack_ext(obj, fp, options) + elif ext_handlers: + # Linear search for superclass + t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None) + if t: + _pack_ext(ext_handlers[t](obj), fp, options) + else: + raise UnsupportedTypeException( + "unsupported type: %s" % str(type(obj))) + else: + raise UnsupportedTypeException("unsupported type: %s" % str(type(obj))) + + +# Pack for Python 3, with unicode 'str' type, 'bytes' type, and no 'long' type +def _pack3(obj, fp, **options): + """ + Serialize a Python object into MessagePack bytes. + + Args: + obj: a Python object + fp: a .write()-supporting file-like object + + Kwargs: + ext_handlers (dict): dictionary of Ext handlers, mapping a custom type + to a callable that packs an instance of the type + into an Ext object + force_float_precision (str): "single" to force packing floats as + IEEE-754 single-precision floats, + "double" to force packing floats as + IEEE-754 double-precision floats. + + Returns: + None. + + Raises: + UnsupportedType(PackException): + Object type not supported for packing. + + Example: + >>> f = open('test.bin', 'wb') + >>> umsgpack.pack({u"compact": True, u"schema": 0}, f) + >>> + """ + global compatibility + + ext_handlers = options.get("ext_handlers") + + if obj is None: + _pack_nil(obj, fp, options) + elif ext_handlers and obj.__class__ in ext_handlers: + _pack_ext(ext_handlers[obj.__class__](obj), fp, options) + elif isinstance(obj, bool): + _pack_boolean(obj, fp, options) + elif isinstance(obj, int): + _pack_integer(obj, fp, options) + elif isinstance(obj, float): + _pack_float(obj, fp, options) + elif compatibility and isinstance(obj, str): + _pack_oldspec_raw(obj.encode('utf-8'), fp, options) + elif compatibility and isinstance(obj, bytes): + _pack_oldspec_raw(obj, fp, options) + elif isinstance(obj, str): + _pack_string(obj, fp, options) + elif isinstance(obj, bytes): + _pack_binary(obj, fp, options) + elif isinstance(obj, (list, tuple)): + _pack_array(obj, fp, options) + elif isinstance(obj, dict): + _pack_map(obj, fp, options) + elif isinstance(obj, Ext): + _pack_ext(obj, fp, options) + elif ext_handlers: + # Linear search for superclass + t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None) + if t: + _pack_ext(ext_handlers[t](obj), fp, options) + else: + raise UnsupportedTypeException( + "unsupported type: %s" % str(type(obj))) + else: + raise UnsupportedTypeException( + "unsupported type: %s" % str(type(obj))) + + +def _packb2(obj, **options): + """ + Serialize a Python object into MessagePack bytes. + + Args: + obj: a Python object + + Kwargs: + ext_handlers (dict): dictionary of Ext handlers, mapping a custom type + to a callable that packs an instance of the type + into an Ext object + force_float_precision (str): "single" to force packing floats as + IEEE-754 single-precision floats, + "double" to force packing floats as + IEEE-754 double-precision floats. + + Returns: + A 'str' containing serialized MessagePack bytes. + + Raises: + UnsupportedType(PackException): + Object type not supported for packing. + + Example: + >>> umsgpack.packb({u"compact": True, u"schema": 0}) + '\x82\xa7compact\xc3\xa6schema\x00' + >>> + """ + fp = io.BytesIO() + _pack2(obj, fp, **options) + return fp.getvalue() + + +def _packb3(obj, **options): + """ + Serialize a Python object into MessagePack bytes. + + Args: + obj: a Python object + + Kwargs: + ext_handlers (dict): dictionary of Ext handlers, mapping a custom type + to a callable that packs an instance of the type + into an Ext object + force_float_precision (str): "single" to force packing floats as + IEEE-754 single-precision floats, + "double" to force packing floats as + IEEE-754 double-precision floats. + + Returns: + A 'bytes' containing serialized MessagePack bytes. + + Raises: + UnsupportedType(PackException): + Object type not supported for packing. + + Example: + >>> umsgpack.packb({u"compact": True, u"schema": 0}) + b'\x82\xa7compact\xc3\xa6schema\x00' + >>> + """ + fp = io.BytesIO() + _pack3(obj, fp, **options) + return fp.getvalue() + +############################################################################# +# Unpacking +############################################################################# + + +def _read_except(fp, n): + data = fp.read(n) + if len(data) < n: + raise InsufficientDataException() + return data + + +def _unpack_integer(code, fp, options): + if (ord(code) & 0xe0) == 0xe0: + return struct.unpack("b", code)[0] + elif code == b'\xd0': + return struct.unpack("b", _read_except(fp, 1))[0] + elif code == b'\xd1': + return struct.unpack(">h", _read_except(fp, 2))[0] + elif code == b'\xd2': + return struct.unpack(">i", _read_except(fp, 4))[0] + elif code == b'\xd3': + return struct.unpack(">q", _read_except(fp, 8))[0] + elif (ord(code) & 0x80) == 0x00: + return struct.unpack("B", code)[0] + elif code == b'\xcc': + return struct.unpack("B", _read_except(fp, 1))[0] + elif code == b'\xcd': + return struct.unpack(">H", _read_except(fp, 2))[0] + elif code == b'\xce': + return struct.unpack(">I", _read_except(fp, 4))[0] + elif code == b'\xcf': + return struct.unpack(">Q", _read_except(fp, 8))[0] + raise Exception("logic error, not int: 0x%02x" % ord(code)) + + +def _unpack_reserved(code, fp, options): + if code == b'\xc1': + raise ReservedCodeException( + "encountered reserved code: 0x%02x" % ord(code)) + raise Exception( + "logic error, not reserved code: 0x%02x" % ord(code)) + + +def _unpack_nil(code, fp, options): + if code == b'\xc0': + return None + raise Exception("logic error, not nil: 0x%02x" % ord(code)) + + +def _unpack_boolean(code, fp, options): + if code == b'\xc2': + return False + elif code == b'\xc3': + return True + raise Exception("logic error, not boolean: 0x%02x" % ord(code)) + + +def _unpack_float(code, fp, options): + if code == b'\xca': + return struct.unpack(">f", _read_except(fp, 4))[0] + elif code == b'\xcb': + return struct.unpack(">d", _read_except(fp, 8))[0] + raise Exception("logic error, not float: 0x%02x" % ord(code)) + + +def _unpack_string(code, fp, options): + if (ord(code) & 0xe0) == 0xa0: + length = ord(code) & ~0xe0 + elif code == b'\xd9': + length = struct.unpack("B", _read_except(fp, 1))[0] + elif code == b'\xda': + length = struct.unpack(">H", _read_except(fp, 2))[0] + elif code == b'\xdb': + length = struct.unpack(">I", _read_except(fp, 4))[0] + else: + raise Exception("logic error, not string: 0x%02x" % ord(code)) + + # Always return raw bytes in compatibility mode + global compatibility + if compatibility: + return _read_except(fp, length) + + data = _read_except(fp, length) + try: + return bytes.decode(data, 'utf-8') + except UnicodeDecodeError: + if options.get("allow_invalid_utf8"): + return InvalidString(data) + raise InvalidStringException("unpacked string is invalid utf-8") + + +def _unpack_binary(code, fp, options): + if code == b'\xc4': + length = struct.unpack("B", _read_except(fp, 1))[0] + elif code == b'\xc5': + length = struct.unpack(">H", _read_except(fp, 2))[0] + elif code == b'\xc6': + length = struct.unpack(">I", _read_except(fp, 4))[0] + else: + raise Exception("logic error, not binary: 0x%02x" % ord(code)) + + return _read_except(fp, length) + + +def _unpack_ext(code, fp, options): + if code == b'\xd4': + length = 1 + elif code == b'\xd5': + length = 2 + elif code == b'\xd6': + length = 4 + elif code == b'\xd7': + length = 8 + elif code == b'\xd8': + length = 16 + elif code == b'\xc7': + length = struct.unpack("B", _read_except(fp, 1))[0] + elif code == b'\xc8': + length = struct.unpack(">H", _read_except(fp, 2))[0] + elif code == b'\xc9': + length = struct.unpack(">I", _read_except(fp, 4))[0] + else: + raise Exception("logic error, not ext: 0x%02x" % ord(code)) + + ext = Ext(ord(_read_except(fp, 1)), _read_except(fp, length)) + + # Unpack with ext handler, if we have one + ext_handlers = options.get("ext_handlers") + if ext_handlers and ext.type in ext_handlers: + ext = ext_handlers[ext.type](ext) + + return ext + + +def _unpack_array(code, fp, options): + if (ord(code) & 0xf0) == 0x90: + length = (ord(code) & ~0xf0) + elif code == b'\xdc': + length = struct.unpack(">H", _read_except(fp, 2))[0] + elif code == b'\xdd': + length = struct.unpack(">I", _read_except(fp, 4))[0] + else: + raise Exception("logic error, not array: 0x%02x" % ord(code)) + + return [_unpack(fp, options) for _ in xrange(length)] + + +def _deep_list_to_tuple(obj): + if isinstance(obj, list): + return tuple([_deep_list_to_tuple(e) for e in obj]) + return obj + + +def _unpack_map(code, fp, options): + if (ord(code) & 0xf0) == 0x80: + length = (ord(code) & ~0xf0) + elif code == b'\xde': + length = struct.unpack(">H", _read_except(fp, 2))[0] + elif code == b'\xdf': + length = struct.unpack(">I", _read_except(fp, 4))[0] + else: + raise Exception("logic error, not map: 0x%02x" % ord(code)) + + d = {} if not options.get('use_ordered_dict') \ + else collections.OrderedDict() + for _ in xrange(length): + # Unpack key + k = _unpack(fp, options) + + if isinstance(k, list): + # Attempt to convert list into a hashable tuple + k = _deep_list_to_tuple(k) + elif not isinstance(k, collections.Hashable): + raise UnhashableKeyException( + "encountered unhashable key: %s, %s" % (str(k), str(type(k)))) + elif k in d: + raise DuplicateKeyException( + "encountered duplicate key: %s, %s" % (str(k), str(type(k)))) + + # Unpack value + v = _unpack(fp, options) + + try: + d[k] = v + except TypeError: + raise UnhashableKeyException( + "encountered unhashable key: %s" % str(k)) + return d + + +def _unpack(fp, options): + code = _read_except(fp, 1) + return _unpack_dispatch_table[code](code, fp, options) + +######################################## + + +def _unpack2(fp, **options): + """ + Deserialize MessagePack bytes into a Python object. + + Args: + fp: a .read()-supporting file-like object + + Kwargs: + ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext + type to a callable that unpacks an instance of + Ext into an object + use_ordered_dict (bool): unpack maps into OrderedDict, instead of + unordered dict (default False) + allow_invalid_utf8 (bool): unpack invalid strings into instances of + InvalidString, for access to the bytes + (default False) + + Returns: + A Python object. + + Raises: + InsufficientDataException(UnpackException): + Insufficient data to unpack the serialized object. + InvalidStringException(UnpackException): + Invalid UTF-8 string encountered during unpacking. + ReservedCodeException(UnpackException): + Reserved code encountered during unpacking. + UnhashableKeyException(UnpackException): + Unhashable key encountered during map unpacking. + The serialized map cannot be deserialized into a Python dictionary. + DuplicateKeyException(UnpackException): + Duplicate key encountered during map unpacking. + + Example: + >>> f = open('test.bin', 'rb') + >>> umsgpack.unpackb(f) + {u'compact': True, u'schema': 0} + >>> + """ + return _unpack(fp, options) + + +def _unpack3(fp, **options): + """ + Deserialize MessagePack bytes into a Python object. + + Args: + fp: a .read()-supporting file-like object + + Kwargs: + ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext + type to a callable that unpacks an instance of + Ext into an object + use_ordered_dict (bool): unpack maps into OrderedDict, instead of + unordered dict (default False) + allow_invalid_utf8 (bool): unpack invalid strings into instances of + InvalidString, for access to the bytes + (default False) + + Returns: + A Python object. + + Raises: + InsufficientDataException(UnpackException): + Insufficient data to unpack the serialized object. + InvalidStringException(UnpackException): + Invalid UTF-8 string encountered during unpacking. + ReservedCodeException(UnpackException): + Reserved code encountered during unpacking. + UnhashableKeyException(UnpackException): + Unhashable key encountered during map unpacking. + The serialized map cannot be deserialized into a Python dictionary. + DuplicateKeyException(UnpackException): + Duplicate key encountered during map unpacking. + + Example: + >>> f = open('test.bin', 'rb') + >>> umsgpack.unpackb(f) + {'compact': True, 'schema': 0} + >>> + """ + return _unpack(fp, options) + + +# For Python 2, expects a str object +def _unpackb2(s, **options): + """ + Deserialize MessagePack bytes into a Python object. + + Args: + s: a 'str' or 'bytearray' containing serialized MessagePack bytes + + Kwargs: + ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext + type to a callable that unpacks an instance of + Ext into an object + use_ordered_dict (bool): unpack maps into OrderedDict, instead of + unordered dict (default False) + allow_invalid_utf8 (bool): unpack invalid strings into instances of + InvalidString, for access to the bytes + (default False) + + Returns: + A Python object. + + Raises: + TypeError: + Packed data type is neither 'str' nor 'bytearray'. + InsufficientDataException(UnpackException): + Insufficient data to unpack the serialized object. + InvalidStringException(UnpackException): + Invalid UTF-8 string encountered during unpacking. + ReservedCodeException(UnpackException): + Reserved code encountered during unpacking. + UnhashableKeyException(UnpackException): + Unhashable key encountered during map unpacking. + The serialized map cannot be deserialized into a Python dictionary. + DuplicateKeyException(UnpackException): + Duplicate key encountered during map unpacking. + + Example: + >>> umsgpack.unpackb(b'\x82\xa7compact\xc3\xa6schema\x00') + {u'compact': True, u'schema': 0} + >>> + """ + if not isinstance(s, (str, bytearray)): + raise TypeError("packed data must be type 'str' or 'bytearray'") + return _unpack(io.BytesIO(s), options) + + +# For Python 3, expects a bytes object +def _unpackb3(s, **options): + """ + Deserialize MessagePack bytes into a Python object. + + Args: + s: a 'bytes' or 'bytearray' containing serialized MessagePack bytes + + Kwargs: + ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext + type to a callable that unpacks an instance of + Ext into an object + use_ordered_dict (bool): unpack maps into OrderedDict, instead of + unordered dict (default False) + allow_invalid_utf8 (bool): unpack invalid strings into instances of + InvalidString, for access to the bytes + (default False) + + Returns: + A Python object. + + Raises: + TypeError: + Packed data type is neither 'bytes' nor 'bytearray'. + InsufficientDataException(UnpackException): + Insufficient data to unpack the serialized object. + InvalidStringException(UnpackException): + Invalid UTF-8 string encountered during unpacking. + ReservedCodeException(UnpackException): + Reserved code encountered during unpacking. + UnhashableKeyException(UnpackException): + Unhashable key encountered during map unpacking. + The serialized map cannot be deserialized into a Python dictionary. + DuplicateKeyException(UnpackException): + Duplicate key encountered during map unpacking. + + Example: + >>> umsgpack.unpackb(b'\x82\xa7compact\xc3\xa6schema\x00') + {'compact': True, 'schema': 0} + >>> + """ + if not isinstance(s, (bytes, bytearray)): + raise TypeError("packed data must be type 'bytes' or 'bytearray'") + return _unpack(io.BytesIO(s), options) + +############################################################################# +# Module Initialization +############################################################################# + + +def __init(): + # pylint: disable=global-variable-undefined + + global pack + global packb + global unpack + global unpackb + global dump + global dumps + global load + global loads + global compatibility + global _float_precision + global _unpack_dispatch_table + global xrange + + # Compatibility mode for handling strings/bytes with the old specification + compatibility = False + + # Auto-detect system float precision + if sys.float_info.mant_dig == 53: + _float_precision = "double" + else: + _float_precision = "single" + + # Map packb and unpackb to the appropriate version + if sys.version_info[0] == 3: + pack = _pack3 + packb = _packb3 + dump = _pack3 + dumps = _packb3 + unpack = _unpack3 + unpackb = _unpackb3 + load = _unpack3 + loads = _unpackb3 + xrange = range # pylint: disable=redefined-builtin + else: + pack = _pack2 + packb = _packb2 + dump = _pack2 + dumps = _packb2 + unpack = _unpack2 + unpackb = _unpackb2 + load = _unpack2 + loads = _unpackb2 + + # Build a dispatch table for fast lookup of unpacking function + + _unpack_dispatch_table = {} + # Fix uint + for code in range(0, 0x7f + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer + # Fix map + for code in range(0x80, 0x8f + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_map + # Fix array + for code in range(0x90, 0x9f + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_array + # Fix str + for code in range(0xa0, 0xbf + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_string + # Nil + _unpack_dispatch_table[b'\xc0'] = _unpack_nil + # Reserved + _unpack_dispatch_table[b'\xc1'] = _unpack_reserved + # Boolean + _unpack_dispatch_table[b'\xc2'] = _unpack_boolean + _unpack_dispatch_table[b'\xc3'] = _unpack_boolean + # Bin + for code in range(0xc4, 0xc6 + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_binary + # Ext + for code in range(0xc7, 0xc9 + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_ext + # Float + _unpack_dispatch_table[b'\xca'] = _unpack_float + _unpack_dispatch_table[b'\xcb'] = _unpack_float + # Uint + for code in range(0xcc, 0xcf + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer + # Int + for code in range(0xd0, 0xd3 + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer + # Fixext + for code in range(0xd4, 0xd8 + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_ext + # String + for code in range(0xd9, 0xdb + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_string + # Array + _unpack_dispatch_table[b'\xdc'] = _unpack_array + _unpack_dispatch_table[b'\xdd'] = _unpack_array + # Map + _unpack_dispatch_table[b'\xde'] = _unpack_map + _unpack_dispatch_table[b'\xdf'] = _unpack_map + # Negative fixint + for code in range(0xe0, 0xff + 1): + _unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer + + +__init() diff --git a/src/tests/mock/pybitmessage/helper_ackPayload.py b/src/tests/mock/pybitmessage/helper_ackPayload.py new file mode 100644 index 00000000..d30f4c0d --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_ackPayload.py @@ -0,0 +1,51 @@ +""" +This module is for generating ack payload +""" + +from binascii import hexlify +from struct import pack + +import helper_random +import highlevelcrypto +from addresses import encodeVarint + + +def genAckPayload(streamNumber=1, stealthLevel=0): + """ + Generate and return payload obj. + + This function generates payload objects for message acknowledgements + Several stealth levels are available depending on the privacy needs; + a higher level means better stealth, but also higher cost (size+POW) + + - level 0: a random 32-byte sequence with a message header appended + - level 1: a getpubkey request for a (random) dummy key hash + - level 2: a standard message, encrypted to a random pubkey + """ + if stealthLevel == 2: # Generate privacy-enhanced payload + # Generate a dummy privkey and derive the pubkey + dummyPubKeyHex = highlevelcrypto.privToPub( + hexlify(helper_random.randomBytes(32))) + # Generate a dummy message of random length + # (the smallest possible standard-formatted message is 234 bytes) + dummyMessage = helper_random.randomBytes( + helper_random.randomrandrange(234, 801)) + # Encrypt the message using standard BM encryption (ECIES) + ackdata = highlevelcrypto.encrypt(dummyMessage, dummyPubKeyHex) + acktype = 2 # message + version = 1 + + elif stealthLevel == 1: # Basic privacy payload (random getpubkey) + ackdata = helper_random.randomBytes(32) + acktype = 0 # getpubkey + version = 4 + + else: # Minimum viable payload (non stealth) + ackdata = helper_random.randomBytes(32) + acktype = 2 # message + version = 1 + + ackobject = pack('>I', acktype) + encodeVarint( + version) + encodeVarint(streamNumber) + ackdata + + return ackobject diff --git a/src/tests/mock/pybitmessage/helper_addressbook.py b/src/tests/mock/pybitmessage/helper_addressbook.py new file mode 100644 index 00000000..fb572150 --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_addressbook.py @@ -0,0 +1,14 @@ +""" +Insert value into addressbook +""" + +from bmconfigparser import BMConfigParser +from helper_sql import sqlExecute + + +def insert(address, label): + """perform insert into addressbook""" + + if address not in BMConfigParser().addresses(): + return sqlExecute('''INSERT INTO addressbook VALUES (?,?)''', label, address) == 1 + return False diff --git a/src/tests/mock/pybitmessage/helper_bitcoin.py b/src/tests/mock/pybitmessage/helper_bitcoin.py new file mode 100644 index 00000000..d4f1d105 --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_bitcoin.py @@ -0,0 +1,56 @@ +""" +Calculates bitcoin and testnet address from pubkey +""" + +import hashlib + +from debug import logger +from pyelliptic import arithmetic + + +def calculateBitcoinAddressFromPubkey(pubkey): + """Calculate bitcoin address from given pubkey (65 bytes long hex string)""" + if len(pubkey) != 65: + logger.error('Could not calculate Bitcoin address from pubkey because' + ' function was passed a pubkey that was' + ' %i bytes long rather than 65.', len(pubkey)) + return "error" + ripe = hashlib.new('ripemd160') + sha = hashlib.new('sha256') + sha.update(pubkey) + ripe.update(sha.digest()) + ripeWithProdnetPrefix = '\x00' + ripe.digest() + + checksum = hashlib.sha256(hashlib.sha256( + ripeWithProdnetPrefix).digest()).digest()[:4] + binaryBitcoinAddress = ripeWithProdnetPrefix + checksum + numberOfZeroBytesOnBinaryBitcoinAddress = 0 + while binaryBitcoinAddress[0] == '\x00': + numberOfZeroBytesOnBinaryBitcoinAddress += 1 + binaryBitcoinAddress = binaryBitcoinAddress[1:] + base58encoded = arithmetic.changebase(binaryBitcoinAddress, 256, 58) + return "1" * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded + + +def calculateTestnetAddressFromPubkey(pubkey): + """This function expects that pubkey begin with the testnet prefix""" + if len(pubkey) != 65: + logger.error('Could not calculate Bitcoin address from pubkey because' + ' function was passed a pubkey that was' + ' %i bytes long rather than 65.', len(pubkey)) + return "error" + ripe = hashlib.new('ripemd160') + sha = hashlib.new('sha256') + sha.update(pubkey) + ripe.update(sha.digest()) + ripeWithProdnetPrefix = '\x6F' + ripe.digest() + + checksum = hashlib.sha256(hashlib.sha256( + ripeWithProdnetPrefix).digest()).digest()[:4] + binaryBitcoinAddress = ripeWithProdnetPrefix + checksum + numberOfZeroBytesOnBinaryBitcoinAddress = 0 + while binaryBitcoinAddress[0] == '\x00': + numberOfZeroBytesOnBinaryBitcoinAddress += 1 + binaryBitcoinAddress = binaryBitcoinAddress[1:] + base58encoded = arithmetic.changebase(binaryBitcoinAddress, 256, 58) + return "1" * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded diff --git a/src/tests/mock/pybitmessage/helper_inbox.py b/src/tests/mock/pybitmessage/helper_inbox.py new file mode 100644 index 00000000..d99e9544 --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_inbox.py @@ -0,0 +1,30 @@ +"""Helper Inbox performs inbox messages related operations""" + +import queues +from helper_sql import sqlExecute, sqlQuery + + +def insert(t): + """Perform an insert into the "inbox" table""" + sqlExecute('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?,?,?,?)''', *t) + # shouldn't emit changedInboxUnread and displayNewInboxMessage + # at the same time + # queues.UISignalQueue.put(('changedInboxUnread', None)) + + +def trash(msgid): + """Mark a message in the `inbox` as `trash`""" + sqlExecute('''UPDATE inbox SET folder='trash' WHERE msgid=?''', msgid) + queues.UISignalQueue.put(('removeInboxRowByMsgid', msgid)) + + +def undeleteMessage(msgid): + """Undelte the message""" + sqlExecute('''UPDATE inbox SET folder='inbox' WHERE msgid=?''', msgid) + + +def isMessageAlreadyInInbox(sigHash): + """Check for previous instances of this message""" + queryReturn = sqlQuery( + '''SELECT COUNT(*) FROM inbox WHERE sighash=?''', sigHash) + return queryReturn[0][0] != 0 diff --git a/src/tests/mock/pybitmessage/helper_msgcoding.py b/src/tests/mock/pybitmessage/helper_msgcoding.py new file mode 100644 index 00000000..28f92288 --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_msgcoding.py @@ -0,0 +1,159 @@ +""" +Message encoding end decoding functions +""" + +import string +import zlib + +import messagetypes +from bmconfigparser import BMConfigParser +from debug import logger +from tr import _translate + +try: + import msgpack +except ImportError: + try: + import umsgpack as msgpack + except ImportError: + import fallback.umsgpack.umsgpack as msgpack + +BITMESSAGE_ENCODING_IGNORE = 0 +BITMESSAGE_ENCODING_TRIVIAL = 1 +BITMESSAGE_ENCODING_SIMPLE = 2 +BITMESSAGE_ENCODING_EXTENDED = 3 + + +class MsgEncodeException(Exception): + """Exception during message encoding""" + pass + + +class MsgDecodeException(Exception): + """Exception during message decoding""" + pass + + +class DecompressionSizeException(MsgDecodeException): + # pylint: disable=super-init-not-called + """Decompression resulted in too much data (attack protection)""" + def __init__(self, size): + self.size = size + + +class MsgEncode(object): + """Message encoder class""" + def __init__(self, message, encoding=BITMESSAGE_ENCODING_SIMPLE): + self.data = None + self.encoding = encoding + self.length = 0 + if self.encoding == BITMESSAGE_ENCODING_EXTENDED: + self.encodeExtended(message) + elif self.encoding == BITMESSAGE_ENCODING_SIMPLE: + self.encodeSimple(message) + elif self.encoding == BITMESSAGE_ENCODING_TRIVIAL: + self.encodeTrivial(message) + else: + raise MsgEncodeException("Unknown encoding %i" % (encoding)) + + def encodeExtended(self, message): + """Handle extended encoding""" + try: + msgObj = messagetypes.message.Message() + self.data = zlib.compress(msgpack.dumps(msgObj.encode(message)), 9) + except zlib.error: + logger.error("Error compressing message") + raise MsgEncodeException("Error compressing message") + except msgpack.exceptions.PackException: + logger.error("Error msgpacking message") + raise MsgEncodeException("Error msgpacking message") + self.length = len(self.data) + + def encodeSimple(self, message): + """Handle simple encoding""" + self.data = 'Subject:%(subject)s\nBody:%(body)s' % message + self.length = len(self.data) + + def encodeTrivial(self, message): + """Handle trivial encoding""" + self.data = message['body'] + self.length = len(self.data) + + +class MsgDecode(object): + """Message decoder class""" + def __init__(self, encoding, data): + self.encoding = encoding + if self.encoding == BITMESSAGE_ENCODING_EXTENDED: + self.decodeExtended(data) + elif self.encoding in ( + BITMESSAGE_ENCODING_SIMPLE, BITMESSAGE_ENCODING_TRIVIAL): + self.decodeSimple(data) + else: + self.body = _translate( + "MsgDecode", + "The message has an unknown encoding.\n" + "Perhaps you should upgrade Bitmessage.") + self.subject = _translate("MsgDecode", "Unknown encoding") + + def decodeExtended(self, data): + """Handle extended encoding""" + dc = zlib.decompressobj() + tmp = "" + while len(tmp) <= BMConfigParser().safeGetInt("zlib", "maxsize"): + try: + got = dc.decompress( + data, BMConfigParser().safeGetInt("zlib", "maxsize") + + 1 - len(tmp)) + # EOF + if got == "": + break + tmp += got + data = dc.unconsumed_tail + except zlib.error: + logger.error("Error decompressing message") + raise MsgDecodeException("Error decompressing message") + else: + raise DecompressionSizeException(len(tmp)) + + try: + tmp = msgpack.loads(tmp) + except (msgpack.exceptions.UnpackException, + msgpack.exceptions.ExtraData): + logger.error("Error msgunpacking message") + raise MsgDecodeException("Error msgunpacking message") + + try: + msgType = tmp[""] + except KeyError: + logger.error("Message type missing") + raise MsgDecodeException("Message type missing") + + msgObj = messagetypes.constructObject(tmp) + if msgObj is None: + raise MsgDecodeException("Malformed message") + try: + msgObj.process() + except: # noqa:E722 + raise MsgDecodeException("Malformed message") + if msgType == "message": + self.subject = msgObj.subject + self.body = msgObj.body + + def decodeSimple(self, data): + """Handle simple encoding""" + bodyPositionIndex = string.find(data, '\nBody:') + if bodyPositionIndex > 1: + subject = data[8:bodyPositionIndex] + # Only save and show the first 500 characters of the subject. + # Any more is probably an attack. + subject = subject[:500] + body = data[bodyPositionIndex + 6:] + else: + subject = '' + body = data + # Throw away any extra lines (headers) after the subject. + if subject: + subject = subject.splitlines()[0] + self.subject = subject + self.body = body diff --git a/src/tests/mock/pybitmessage/helper_random.py b/src/tests/mock/pybitmessage/helper_random.py new file mode 100644 index 00000000..43194d4e --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_random.py @@ -0,0 +1,74 @@ +"""Convenience functions for random operations. Not suitable for security / cryptography operations.""" + +import os +import random + +try: + from pyelliptic.openssl import OpenSSL +except ImportError: + from .openssl import OpenSSL + +NoneType = type(None) + + +def seed(): + """Initialize random number generator""" + random.seed() + + +def randomBytes(n): + """Method randomBytes.""" + try: + return os.urandom(n) + except NotImplementedError: + return OpenSSL.rand(n) + + +def randomshuffle(population): + """Method randomShuffle. + + shuffle the sequence x in place. + shuffles the elements in list in place, + so they are in a random order. + As Shuffle will alter data in-place, + so its input must be a mutable sequence. + In contrast, sample produces a new list + and its input can be much more varied + (tuple, string, xrange, bytearray, set, etc) + """ + random.shuffle(population) + + +def randomsample(population, k): + """Method randomSample. + + return a k length list of unique elements + chosen from the population sequence. + Used for random sampling + without replacement, its called + partial shuffle. + """ + return random.sample(population, k) + + +def randomrandrange(x, y=None): + """Method randomRandrange. + + return a randomly selected element from + range(start, stop). This is equivalent to + choice(range(start, stop)), + but doesnt actually build a range object. + """ + if isinstance(y, NoneType): + return random.randrange(x) # nosec + return random.randrange(x, y) # nosec + + +def randomchoice(population): + """Method randomchoice. + + Return a random element from the non-empty + sequence seq. If seq is empty, raises + IndexError. + """ + return random.choice(population) # nosec diff --git a/src/tests/mock/pybitmessage/helper_search.py b/src/tests/mock/pybitmessage/helper_search.py new file mode 100644 index 00000000..9fcb88b5 --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_search.py @@ -0,0 +1,113 @@ +""" +Additional SQL helper for searching messages. +Used by :mod:`.bitmessageqt`. +""" + +from helper_sql import sqlQuery +from tr import _translate + + +def search_sql( + xAddress='toaddress', account=None, folder='inbox', where=None, + what=None, unreadOnly=False +): + """ + Search for messages from given account and folder having search term + in one of it's fields. + + :param str xAddress: address field checked + ('fromaddress', 'toaddress' or 'both') + :param account: the account which is checked + :type account: :class:`.bitmessageqt.account.BMAccount` + instance + :param str folder: the folder which is checked + :param str where: message field which is checked ('toaddress', + 'fromaddress', 'subject' or 'message'), by default check any field + :param str what: the search term + :param bool unreadOnly: if True, search only for unread messages + :return: all messages where field contains + :rtype: list[list] + """ + # pylint: disable=too-many-arguments, too-many-branches + if what: + what = '%' + what + '%' + if where == _translate("MainWindow", "To"): + where = 'toaddress' + elif where == _translate("MainWindow", "From"): + where = 'fromaddress' + elif where == _translate("MainWindow", "Subject"): + where = 'subject' + elif where == _translate("MainWindow", "Message"): + where = 'message' + else: + where = 'toaddress || fromaddress || subject || message' + + sqlStatementBase = 'SELECT toaddress, fromaddress, subject, ' + ( + 'status, ackdata, lastactiontime FROM sent ' if folder == 'sent' + else 'folder, msgid, received, read FROM inbox ' + ) + + sqlStatementParts = [] + sqlArguments = [] + if account is not None: + if xAddress == 'both': + sqlStatementParts.append('(fromaddress = ? OR toaddress = ?)') + sqlArguments.append(account) + sqlArguments.append(account) + else: + sqlStatementParts.append(xAddress + ' = ? ') + sqlArguments.append(account) + if folder is not None: + if folder == 'new': + folder = 'inbox' + unreadOnly = True + sqlStatementParts.append('folder = ? ') + sqlArguments.append(folder) + else: + sqlStatementParts.append('folder != ?') + sqlArguments.append('trash') + if what: + sqlStatementParts.append('%s LIKE ?' % (where)) + sqlArguments.append(what) + if unreadOnly: + sqlStatementParts.append('read = 0') + if sqlStatementParts: + sqlStatementBase += 'WHERE ' + ' AND '.join(sqlStatementParts) + if folder == 'sent': + sqlStatementBase += ' ORDER BY lastactiontime' + return sqlQuery(sqlStatementBase, sqlArguments) + + +def check_match( + toAddress, fromAddress, subject, message, where=None, what=None): + """ + Check if a single message matches a filter (used when new messages + are added to messagelists) + """ + # pylint: disable=too-many-arguments + if not what: + return True + + if where in ( + _translate("MainWindow", "To"), _translate("MainWindow", "All") + ): + if what.lower() not in toAddress.lower(): + return False + elif where in ( + _translate("MainWindow", "From"), _translate("MainWindow", "All") + ): + if what.lower() not in fromAddress.lower(): + return False + elif where in ( + _translate("MainWindow", "Subject"), + _translate("MainWindow", "All") + ): + if what.lower() not in subject.lower(): + return False + elif where in ( + _translate("MainWindow", "Message"), + _translate("MainWindow", "All") + ): + if what.lower() not in message.lower(): + return False + return True diff --git a/src/tests/mock/pybitmessage/helper_sent.py b/src/tests/mock/pybitmessage/helper_sent.py new file mode 100644 index 00000000..d83afce6 --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_sent.py @@ -0,0 +1,48 @@ +""" +Insert values into sent table +""" + +import time +import uuid +from addresses import decodeAddress +from bmconfigparser import BMConfigParser +from helper_ackPayload import genAckPayload +from helper_sql import sqlExecute + + +# pylint: disable=too-many-arguments +def insert(msgid=None, toAddress='[Broadcast subscribers]', fromAddress=None, subject=None, + message=None, status='msgqueued', ripe=None, ackdata=None, sentTime=None, + lastActionTime=None, sleeptill=0, retryNumber=0, encoding=2, ttl=None, folder='sent'): + """Perform an insert into the `sent` table""" + # pylint: disable=unused-variable + # pylint: disable-msg=too-many-locals + + valid_addr = True + if not ripe or not ackdata: + addr = fromAddress if toAddress == '[Broadcast subscribers]' else toAddress + new_status, addressVersionNumber, streamNumber, new_ripe = decodeAddress(addr) + valid_addr = True if new_status == 'success' else False + if not ripe: + ripe = new_ripe + + if not ackdata: + stealthLevel = BMConfigParser().safeGetInt( + 'bitmessagesettings', 'ackstealthlevel') + new_ackdata = genAckPayload(streamNumber, stealthLevel) + ackdata = new_ackdata + if valid_addr: + msgid = msgid if msgid else uuid.uuid4().bytes + sentTime = sentTime if sentTime else int(time.time()) # sentTime (this doesn't change) + lastActionTime = lastActionTime if lastActionTime else int(time.time()) + + ttl = ttl if ttl else BMConfigParser().getint('bitmessagesettings', 'ttl') + + t = (msgid, toAddress, ripe, fromAddress, subject, message, ackdata, + sentTime, lastActionTime, sleeptill, status, retryNumber, folder, + encoding, ttl) + + sqlExecute('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', *t) + return ackdata + else: + return None diff --git a/src/tests/mock/pybitmessage/helper_sql.py b/src/tests/mock/pybitmessage/helper_sql.py new file mode 100644 index 00000000..cba98884 --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_sql.py @@ -0,0 +1,151 @@ +""" +SQL-related functions defined here are really pass the queries (or other SQL +commands) to :class:`.threads.sqlThread` through `sqlSubmitQueue` queue and check +or return the result got from `sqlReturnQueue`. + +This is done that way because :mod:`sqlite3` is so thread-unsafe that they +won't even let you call it from different threads using your own locks. +SQLite objects can only be used from one thread. + +.. note:: This actually only applies for certain deployments, and/or + really old version of sqlite. I haven't actually seen it anywhere. + Current versions do have support for threading and multiprocessing. + I don't see an urgent reason to refactor this, but it should be noted + in the comment that the problem is mostly not valid. Sadly, last time + I checked, there is no reliable way to check whether the library is + or isn't thread-safe. +""" + +import threading + +from six.moves import queue + + +sqlSubmitQueue = queue.Queue() +"""the queue for SQL""" +sqlReturnQueue = queue.Queue() +"""the queue for results""" +sql_lock = threading.Lock() +""" lock to prevent queueing a new request until the previous response + is available """ +sql_available = False +"""set to True by `.threads.sqlThread` immediately upon start""" +sql_ready = threading.Event() +"""set by `.threads.sqlThread` when ready for processing (after + initialization is done)""" + + +def sqlQuery(sql_statement, *args): + """ + Query sqlite and return results + + :param str sql_statement: SQL statement string + :param list args: SQL query parameters + :rtype: list + """ + assert sql_available + sql_lock.acquire() + sqlSubmitQueue.put(sql_statement) + + if args == (): + sqlSubmitQueue.put('') + elif isinstance(args[0], (list, tuple)): + sqlSubmitQueue.put(args[0]) + else: + sqlSubmitQueue.put(args) + queryreturn, _ = sqlReturnQueue.get() + sql_lock.release() + + return queryreturn + + +def sqlExecuteChunked(sql_statement, idCount, *args): + """Execute chunked SQL statement to avoid argument limit""" + # SQLITE_MAX_VARIABLE_NUMBER, + # unfortunately getting/setting isn't exposed to python + assert sql_available + sqlExecuteChunked.chunkSize = 999 + + if idCount == 0 or idCount > len(args): + return 0 + + total_row_count = 0 + with sql_lock: + for i in range( + len(args) - idCount, len(args), + sqlExecuteChunked.chunkSize - (len(args) - idCount) + ): + chunk_slice = args[ + i:i + sqlExecuteChunked.chunkSize - (len(args) - idCount) + ] + sqlSubmitQueue.put( + sql_statement.format(','.join('?' * len(chunk_slice))) + ) + # first static args, and then iterative chunk + sqlSubmitQueue.put( + args[0:len(args) - idCount] + chunk_slice + ) + ret_val = sqlReturnQueue.get() + total_row_count += ret_val[1] + sqlSubmitQueue.put('commit') + return total_row_count + + +def sqlExecute(sql_statement, *args): + """Execute SQL statement (optionally with arguments)""" + assert sql_available + sql_lock.acquire() + sqlSubmitQueue.put(sql_statement) + + if args == (): + sqlSubmitQueue.put('') + else: + sqlSubmitQueue.put(args) + _, rowcount = sqlReturnQueue.get() + sqlSubmitQueue.put('commit') + sql_lock.release() + return rowcount + + +def sqlExecuteScript(sql_statement): + """Execute SQL script statement""" + + statements = sql_statement.split(";") + with SqlBulkExecute() as sql: + for q in statements: + sql.execute("{}".format(q)) + + +def sqlStoredProcedure(procName): + """Schedule procName to be run""" + assert sql_available + sql_lock.acquire() + sqlSubmitQueue.put(procName) + if procName == "exit": + sqlSubmitQueue.task_done() + sqlSubmitQueue.put("terminate") + sql_lock.release() + + +class SqlBulkExecute(object): + """This is used when you have to execute the same statement in a cycle.""" + + def __enter__(self): + sql_lock.acquire() + return self + + def __exit__(self, exc_type, value, traceback): + sqlSubmitQueue.put('commit') + sql_lock.release() + + @staticmethod + def execute(sql_statement, *args): + """Used for statements that do not return results.""" + assert sql_available + sqlSubmitQueue.put(sql_statement) + + if args == (): + sqlSubmitQueue.put('') + else: + sqlSubmitQueue.put(args) + sqlReturnQueue.get() diff --git a/src/tests/mock/pybitmessage/helper_startup.py b/src/tests/mock/pybitmessage/helper_startup.py new file mode 100644 index 00000000..b4951668 --- /dev/null +++ b/src/tests/mock/pybitmessage/helper_startup.py @@ -0,0 +1,392 @@ +""" +Startup operations. +""" +# pylint: disable=too-many-branches,too-many-statements + +import ctypes +import logging +import os +import platform +import socket +import sys +import time +from distutils.version import StrictVersion +from struct import pack + +try: + import defaults + import helper_random + import paths + import state + from bmconfigparser import BMConfigParser +except ImportError: + from . import defaults, helper_random, paths, state + from .bmconfigparser import BMConfigParser + +try: + from plugins.plugin import get_plugin +except ImportError: + get_plugin = None + + +logger = logging.getLogger('default') + +# The user may de-select Portable Mode in the settings if they want +# the config files to stay in the application data folder. +StoreConfigFilesInSameDirectoryAsProgramByDefault = False + + +def loadConfig(): + """Load the config""" + config = BMConfigParser() + if state.appdata: + config.read(state.appdata + 'keys.dat') + # state.appdata must have been specified as a startup option. + needToCreateKeysFile = config.safeGet( + 'bitmessagesettings', 'settingsversion') is None + if not needToCreateKeysFile: + logger.info( + 'Loading config files from directory specified' + ' on startup: %s', state.appdata) + else: + config.read(paths.lookupExeFolder() + 'keys.dat') + + if config.safeGet('bitmessagesettings', 'settingsversion'): + logger.info('Loading config files from same directory as program.') + needToCreateKeysFile = False + state.appdata = paths.lookupExeFolder() + else: + # Could not load the keys.dat file in the program directory. + # Perhaps it is in the appdata directory. + state.appdata = paths.lookupAppdataFolder() + config.read(state.appdata + 'keys.dat') + needToCreateKeysFile = config.safeGet( + 'bitmessagesettings', 'settingsversion') is None + if not needToCreateKeysFile: + logger.info( + 'Loading existing config files from %s', state.appdata) + + if needToCreateKeysFile: + + # This appears to be the first time running the program; there is + # no config file (or it cannot be accessed). Create config file. + config.add_section('bitmessagesettings') + config.set('bitmessagesettings', 'settingsversion', '10') + config.set('bitmessagesettings', 'port', '8444') + config.set('bitmessagesettings', 'timeformat', '%%c') + config.set('bitmessagesettings', 'blackwhitelist', 'black') + config.set('bitmessagesettings', 'startonlogon', 'false') + if 'linux' in sys.platform: + config.set('bitmessagesettings', 'minimizetotray', 'false') + # This isn't implimented yet and when True on + # Ubuntu causes Bitmessage to disappear while + # running when minimized. + else: + config.set('bitmessagesettings', 'minimizetotray', 'true') + config.set('bitmessagesettings', 'showtraynotifications', 'true') + config.set('bitmessagesettings', 'startintray', 'false') + config.set('bitmessagesettings', 'socksproxytype', 'none') + config.set('bitmessagesettings', 'sockshostname', 'localhost') + config.set('bitmessagesettings', 'socksport', '9050') + config.set('bitmessagesettings', 'socksauthentication', 'false') + config.set('bitmessagesettings', 'socksusername', '') + config.set('bitmessagesettings', 'sockspassword', '') + config.set('bitmessagesettings', 'keysencrypted', 'false') + config.set('bitmessagesettings', 'messagesencrypted', 'false') + config.set( + 'bitmessagesettings', 'defaultnoncetrialsperbyte', + str(defaults.networkDefaultProofOfWorkNonceTrialsPerByte)) + config.set( + 'bitmessagesettings', 'defaultpayloadlengthextrabytes', + str(defaults.networkDefaultPayloadLengthExtraBytes)) + config.set('bitmessagesettings', 'minimizeonclose', 'false') + config.set('bitmessagesettings', 'dontconnect', 'true') + config.set('bitmessagesettings', 'replybelow', 'False') + config.set('bitmessagesettings', 'maxdownloadrate', '0') + config.set('bitmessagesettings', 'maxuploadrate', '0') + + # UI setting to stop trying to send messages after X days/months + config.set('bitmessagesettings', 'stopresendingafterxdays', '') + config.set('bitmessagesettings', 'stopresendingafterxmonths', '') + + # Are you hoping to add a new option to the keys.dat file? You're in + # the right place for adding it to users who install the software for + # the first time. But you must also add it to the keys.dat file of + # existing users. To do that, search the class_sqlThread.py file + # for the text: "right above this line!" + + if StoreConfigFilesInSameDirectoryAsProgramByDefault: + # Just use the same directory as the program and forget about + # the appdata folder + state.appdata = '' + logger.info( + 'Creating new config files in same directory as program.') + else: + logger.info('Creating new config files in %s', state.appdata) + if not os.path.exists(state.appdata): + os.makedirs(state.appdata) + if not sys.platform.startswith('win'): + os.umask(0o077) + config.save() + else: + updateConfig() + + +def updateConfig(): + """Save the config""" + config = BMConfigParser() + settingsversion = config.getint('bitmessagesettings', 'settingsversion') + if settingsversion == 1: + config.set('bitmessagesettings', 'socksproxytype', 'none') + config.set('bitmessagesettings', 'sockshostname', 'localhost') + config.set('bitmessagesettings', 'socksport', '9050') + config.set('bitmessagesettings', 'socksauthentication', 'false') + config.set('bitmessagesettings', 'socksusername', '') + config.set('bitmessagesettings', 'sockspassword', '') + config.set('bitmessagesettings', 'sockslisten', 'false') + config.set('bitmessagesettings', 'keysencrypted', 'false') + config.set('bitmessagesettings', 'messagesencrypted', 'false') + settingsversion = 2 + # let class_sqlThread update SQL and continue + elif settingsversion == 4: + config.set( + 'bitmessagesettings', 'defaultnoncetrialsperbyte', + str(defaults.networkDefaultProofOfWorkNonceTrialsPerByte)) + config.set( + 'bitmessagesettings', 'defaultpayloadlengthextrabytes', + str(defaults.networkDefaultPayloadLengthExtraBytes)) + settingsversion = 5 + + if settingsversion == 5: + config.set( + 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', '0') + config.set( + 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', '0') + settingsversion = 7 + + if not config.has_option('bitmessagesettings', 'sockslisten'): + config.set('bitmessagesettings', 'sockslisten', 'false') + + if not config.has_option('bitmessagesettings', 'userlocale'): + config.set('bitmessagesettings', 'userlocale', 'system') + + if not config.has_option('bitmessagesettings', 'sendoutgoingconnections'): + config.set('bitmessagesettings', 'sendoutgoingconnections', 'True') + + if not config.has_option('bitmessagesettings', 'useidenticons'): + config.set('bitmessagesettings', 'useidenticons', 'True') + if not config.has_option('bitmessagesettings', 'identiconsuffix'): + # acts as a salt + config.set( + 'bitmessagesettings', 'identiconsuffix', ''.join( + helper_random.randomchoice( + "123456789ABCDEFGHJKLMNPQRSTUVWXYZ" + "abcdefghijkmnopqrstuvwxyz") for x in range(12)) + ) # a twelve character pseudo-password to salt the identicons + + # Add settings to support no longer resending messages after + # a certain period of time even if we never get an ack + if settingsversion == 7: + config.set('bitmessagesettings', 'stopresendingafterxdays', '') + config.set('bitmessagesettings', 'stopresendingafterxmonths', '') + settingsversion = 8 + + # With the change to protocol version 3, reset the user-settable + # difficulties to 1 + if settingsversion == 8: + config.set( + 'bitmessagesettings', 'defaultnoncetrialsperbyte', + str(defaults.networkDefaultProofOfWorkNonceTrialsPerByte)) + config.set( + 'bitmessagesettings', 'defaultpayloadlengthextrabytes', + str(defaults.networkDefaultPayloadLengthExtraBytes)) + previousTotalDifficulty = int( + config.getint( + 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte') + ) / 320 + previousSmallMessageDifficulty = int( + config.getint( + 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') + ) / 14000 + config.set( + 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', + str(previousTotalDifficulty * 1000)) + config.set( + 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', + str(previousSmallMessageDifficulty * 1000)) + settingsversion = 9 + + # Adjust the required POW values for each of this user's addresses + # to conform to protocol v3 norms. + if settingsversion == 9: + for addressInKeysFile in config.addresses(): + try: + previousTotalDifficulty = float( + config.getint( + addressInKeysFile, 'noncetrialsperbyte')) / 320 + previousSmallMessageDifficulty = float( + config.getint( + addressInKeysFile, 'payloadlengthextrabytes')) / 14000 + if previousTotalDifficulty <= 2: + previousTotalDifficulty = 1 + if previousSmallMessageDifficulty < 1: + previousSmallMessageDifficulty = 1 + config.set( + addressInKeysFile, 'noncetrialsperbyte', + str(int(previousTotalDifficulty * 1000))) + config.set( + addressInKeysFile, 'payloadlengthextrabytes', + str(int(previousSmallMessageDifficulty * 1000))) + except Exception: + continue + config.set('bitmessagesettings', 'maxdownloadrate', '0') + config.set('bitmessagesettings', 'maxuploadrate', '0') + settingsversion = 10 + + # sanity check + if config.safeGetInt( + 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte') == 0: + config.set( + 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', + str(defaults.ridiculousDifficulty + * defaults.networkDefaultProofOfWorkNonceTrialsPerByte) + ) + if config.safeGetInt( + 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') == 0: + config.set( + 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', + str(defaults.ridiculousDifficulty + * defaults.networkDefaultPayloadLengthExtraBytes) + ) + + if not config.has_option('bitmessagesettings', 'onionhostname'): + config.set('bitmessagesettings', 'onionhostname', '') + if not config.has_option('bitmessagesettings', 'onionport'): + config.set('bitmessagesettings', 'onionport', '8444') + if not config.has_option('bitmessagesettings', 'onionbindip'): + config.set('bitmessagesettings', 'onionbindip', '127.0.0.1') + if not config.has_option('bitmessagesettings', 'smtpdeliver'): + config.set('bitmessagesettings', 'smtpdeliver', '') + if not config.has_option( + 'bitmessagesettings', 'hidetrayconnectionnotifications'): + config.set( + 'bitmessagesettings', 'hidetrayconnectionnotifications', 'false') + if config.safeGetInt('bitmessagesettings', 'maxoutboundconnections') < 1: + config.set('bitmessagesettings', 'maxoutboundconnections', '8') + logger.warning('Your maximum outbound connections must be a number.') + + # TTL is now user-specifiable. Let's add an option to save + # whatever the user selects. + if not config.has_option('bitmessagesettings', 'ttl'): + config.set('bitmessagesettings', 'ttl', '367200') + + config.set('bitmessagesettings', 'settingsversion', str(settingsversion)) + config.save() + + +def adjustHalfOpenConnectionsLimit(): + """Check and satisfy half-open connections limit (mainly XP and Vista)""" + if BMConfigParser().safeGet( + 'bitmessagesettings', 'socksproxytype', 'none') != 'none': + state.maximumNumberOfHalfOpenConnections = 4 + return + + is_limited = False + try: + if sys.platform[0:3] == "win": + # Some XP and Vista systems can only have 10 outgoing + # connections at a time. + VER_THIS = StrictVersion(platform.version()) + is_limited = ( + StrictVersion("5.1.2600") <= VER_THIS + and StrictVersion("6.0.6000") >= VER_THIS + ) + except ValueError: + pass + + state.maximumNumberOfHalfOpenConnections = 9 if is_limited else 64 + + +def fixSocket(): + """Add missing socket options and methods mainly on Windows""" + if sys.platform.startswith('linux'): + socket.SO_BINDTODEVICE = 25 + + if not sys.platform.startswith('win'): + return + + # Python 2 on Windows doesn't define a wrapper for + # socket.inet_ntop but we can make one ourselves using ctypes + if not hasattr(socket, 'inet_ntop'): + addressToString = ctypes.windll.ws2_32.WSAAddressToStringA + + def inet_ntop(family, host): + """Converting an IP address in packed + binary format to string format""" + if family == socket.AF_INET: + if len(host) != 4: + raise ValueError("invalid IPv4 host") + host = pack("hH4s8s", socket.AF_INET, 0, host, "\0" * 8) + elif family == socket.AF_INET6: + if len(host) != 16: + raise ValueError("invalid IPv6 host") + host = pack("hHL16sL", socket.AF_INET6, 0, 0, host, 0) + else: + raise ValueError("invalid address family") + buf = "\0" * 64 + lengthBuf = pack("I", len(buf)) + addressToString(host, len(host), None, buf, lengthBuf) + return buf[0:buf.index("\0")] + socket.inet_ntop = inet_ntop + + # Same for inet_pton + if not hasattr(socket, 'inet_pton'): + stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA + + def inet_pton(family, host): + """Converting an IP address in string format + to a packed binary format""" + buf = "\0" * 28 + lengthBuf = pack("I", len(buf)) + if stringToAddress(str(host), + int(family), + None, + buf, + lengthBuf) != 0: + raise socket.error("illegal IP address passed to inet_pton") + if family == socket.AF_INET: + return buf[4:8] + elif family == socket.AF_INET6: + return buf[8:24] + else: + raise ValueError("invalid address family") + socket.inet_pton = inet_pton + + # These sockopts are needed on for IPv6 support + if not hasattr(socket, 'IPPROTO_IPV6'): + socket.IPPROTO_IPV6 = 41 + if not hasattr(socket, 'IPV6_V6ONLY'): + socket.IPV6_V6ONLY = 27 + + +def start_proxyconfig(): + """Check socksproxytype and start any proxy configuration plugin""" + if not get_plugin: + return + config = BMConfigParser() + proxy_type = config.safeGet('bitmessagesettings', 'socksproxytype') + if proxy_type and proxy_type not in ('none', 'SOCKS4a', 'SOCKS5'): + try: + proxyconfig_start = time.time() + if not get_plugin('proxyconfig', name=proxy_type)(config): + raise TypeError() + except TypeError: + # cannot import shutdown here ): + logger.error( + 'Failed to run proxy config plugin %s', + proxy_type, exc_info=True) + config.setTemp('bitmessagesettings', 'dontconnect', 'true') + else: + logger.info( + 'Started proxy config plugin %s in %s sec', + proxy_type, time.time() - proxyconfig_start) diff --git a/src/tests/mock/pybitmessage/highlevelcrypto.py b/src/tests/mock/pybitmessage/highlevelcrypto.py new file mode 100644 index 00000000..82743acf --- /dev/null +++ b/src/tests/mock/pybitmessage/highlevelcrypto.py @@ -0,0 +1,146 @@ +""" +High level cryptographic functions based on `.pyelliptic` OpenSSL bindings. + +.. note:: + Upstream pyelliptic was upgraded from SHA1 to SHA256 for signing. We must + `upgrade PyBitmessage gracefully. `_ + `More discussion. `_ +""" + +from binascii import hexlify + +import pyelliptic +from pyelliptic import OpenSSL +from pyelliptic import arithmetic as a + +from bmconfigparser import BMConfigParser + +__all__ = ['encrypt', 'makeCryptor', 'pointMult', 'privToPub', 'sign', 'verify'] + + +def makeCryptor(privkey): + """Return a private `.pyelliptic.ECC` instance""" + private_key = a.changebase(privkey, 16, 256, minlen=32) + public_key = pointMult(private_key) + privkey_bin = b'\x02\xca\x00\x20' + private_key + pubkey_bin = ( + b'\x02\xca\x00\x20' + public_key[1:-32] + b'\x00\x20' + public_key[-32:] + ) + cryptor = pyelliptic.ECC( + curve='secp256k1', privkey=privkey_bin, pubkey=pubkey_bin) + return cryptor + + +def hexToPubkey(pubkey): + """Convert a pubkey from hex to binary""" + pubkey_raw = a.changebase(pubkey[2:], 16, 256, minlen=64) + pubkey_bin = b'\x02\xca\x00 ' + pubkey_raw[:32] + b'\x00 ' + pubkey_raw[32:] + return pubkey_bin + + +def makePubCryptor(pubkey): + """Return a public `.pyelliptic.ECC` instance""" + pubkey_bin = hexToPubkey(pubkey) + return pyelliptic.ECC(curve='secp256k1', pubkey=pubkey_bin) + + +def privToPub(privkey): + """Converts hex private key into hex public key""" + private_key = a.changebase(privkey, 16, 256, minlen=32) + public_key = pointMult(private_key) + return hexlify(public_key) + + +def encrypt(msg, hexPubkey): + """Encrypts message with hex public key""" + return pyelliptic.ECC(curve='secp256k1').encrypt( + msg, hexToPubkey(hexPubkey)) + + +def decrypt(msg, hexPrivkey): + """Decrypts message with hex private key""" + return makeCryptor(hexPrivkey).decrypt(msg) + + +def decryptFast(msg, cryptor): + """Decrypts message with an existing `.pyelliptic.ECC` object""" + return cryptor.decrypt(msg) + + +def sign(msg, hexPrivkey): + """ + Signs with hex private key using SHA1 or SHA256 depending on + "digestalg" setting + """ + digestAlg = BMConfigParser().safeGet( + 'bitmessagesettings', 'digestalg', 'sha256') + if digestAlg == "sha1": + # SHA1, this will eventually be deprecated + return makeCryptor(hexPrivkey).sign( + msg, digest_alg=OpenSSL.digest_ecdsa_sha1) + elif digestAlg == "sha256": + # SHA256. Eventually this will become the default + return makeCryptor(hexPrivkey).sign(msg, digest_alg=OpenSSL.EVP_sha256) + else: + raise ValueError("Unknown digest algorithm %s" % digestAlg) + + +def verify(msg, sig, hexPubkey): + """Verifies with hex public key using SHA1 or SHA256""" + # As mentioned above, we must upgrade gracefully to use SHA256. So + # let us check the signature using both SHA1 and SHA256 and if one + # of them passes then we will be satisfied. Eventually this can + # be simplified and we'll only check with SHA256. + try: + # old SHA1 algorithm. + sigVerifyPassed = makePubCryptor(hexPubkey).verify( + sig, msg, digest_alg=OpenSSL.digest_ecdsa_sha1) + except: + sigVerifyPassed = False + if sigVerifyPassed: + # The signature check passed using SHA1 + return True + # The signature check using SHA1 failed. Let us try it with SHA256. + try: + return makePubCryptor(hexPubkey).verify( + sig, msg, digest_alg=OpenSSL.EVP_sha256) + except: + return False + + +def pointMult(secret): + """ + Does an EC point multiplication; turns a private key into a public key. + + Evidently, this type of error can occur very rarely: + + >>> File "highlevelcrypto.py", line 54, in pointMult + >>> group = OpenSSL.EC_KEY_get0_group(k) + >>> WindowsError: exception: access violation reading 0x0000000000000008 + """ + while True: + try: + k = OpenSSL.EC_KEY_new_by_curve_name( + OpenSSL.get_curve('secp256k1')) + priv_key = OpenSSL.BN_bin2bn(secret, 32, None) + group = OpenSSL.EC_KEY_get0_group(k) + pub_key = OpenSSL.EC_POINT_new(group) + + OpenSSL.EC_POINT_mul(group, pub_key, priv_key, None, None, None) + OpenSSL.EC_KEY_set_private_key(k, priv_key) + OpenSSL.EC_KEY_set_public_key(k, pub_key) + + size = OpenSSL.i2o_ECPublicKey(k, None) + mb = OpenSSL.create_string_buffer(size) + OpenSSL.i2o_ECPublicKey(k, OpenSSL.byref(OpenSSL.pointer(mb))) + + OpenSSL.EC_POINT_free(pub_key) + OpenSSL.BN_free(priv_key) + OpenSSL.EC_KEY_free(k) + return mb.raw + + except Exception: + import traceback + import time + traceback.print_exc() + time.sleep(0.2) diff --git a/src/tests/mock/inventory.py b/src/tests/mock/pybitmessage/inventory.py similarity index 94% rename from src/tests/mock/inventory.py rename to src/tests/mock/pybitmessage/inventory.py index 4148156f..62468e86 100644 --- a/src/tests/mock/inventory.py +++ b/src/tests/mock/pybitmessage/inventory.py @@ -6,7 +6,7 @@ from singleton import Singleton # pylint: disable=old-style-class,too-few-public-methods @Singleton -class MockInventory(): +class Inventory(): """ Inventory singleton class which uses storage backends to manage the inventory. diff --git a/src/tests/mock/pybitmessage/l10n.py b/src/tests/mock/pybitmessage/l10n.py new file mode 100644 index 00000000..3b16f0b6 --- /dev/null +++ b/src/tests/mock/pybitmessage/l10n.py @@ -0,0 +1,152 @@ +"""Localization helpers""" + +import logging +import os +import re +import sys +import time + +from six.moves import range + +from bmconfigparser import BMConfigParser + +logger = logging.getLogger('default') + +DEFAULT_ENCODING = 'ISO8859-1' +DEFAULT_LANGUAGE = 'en_US' +DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' + +try: + import locale + encoding = locale.getpreferredencoding(True) or DEFAULT_ENCODING + language = ( + locale.getlocale()[0] or locale.getdefaultlocale()[0] + or DEFAULT_LANGUAGE) +except (ImportError, AttributeError): # FIXME: it never happens + logger.exception('Could not determine language or encoding') + locale = None + encoding = DEFAULT_ENCODING + language = DEFAULT_LANGUAGE + + +windowsLanguageMap = { + "ar": "arabic", + "cs": "czech", + "da": "danish", + "de": "german", + "en": "english", + "eo": "esperanto", + "fr": "french", + "it": "italian", + "ja": "japanese", + "nl": "dutch", + "no": "norwegian", + "pl": "polish", + "pt": "portuguese", + "ru": "russian", + "sk": "slovak", + "zh": "chinese", + "zh_CN": "chinese-simplified", + "zh_HK": "chinese-traditional", + "zh_SG": "chinese-simplified", + "zh_TW": "chinese-traditional" +} + + +time_format = BMConfigParser().safeGet( + 'bitmessagesettings', 'timeformat', DEFAULT_TIME_FORMAT) + +if not re.search(r'\d', time.strftime(time_format)): + time_format = DEFAULT_TIME_FORMAT + +# It seems some systems lie about the encoding they use +# so we perform comprehensive decoding tests +elif sys.version_info[0] == 2: + try: + # Check day names + for i in range(7): + time.strftime( + time_format, (0, 0, 0, 0, 0, 0, i, 0, 0)).decode(encoding) + # Check month names + for i in range(1, 13): + time.strftime( + time_format, (0, i, 0, 0, 0, 0, 0, 0, 0)).decode(encoding) + # Check AM/PM + time.strftime( + time_format, (0, 0, 0, 11, 0, 0, 0, 0, 0)).decode(encoding) + time.strftime( + time_format, (0, 0, 0, 13, 0, 0, 0, 0, 0)).decode(encoding) + # Check DST + time.strftime( + time_format, (0, 0, 0, 0, 0, 0, 0, 0, 1)).decode(encoding) + except Exception: # TODO: write tests and determine exception types + logger.exception('Could not decode locale formatted timestamp') + # time_format = DEFAULT_TIME_FORMAT + encoding = DEFAULT_ENCODING + + +def setlocale(newlocale): + """Set the locale""" + try: + locale.setlocale(locale.LC_ALL, newlocale) + except AttributeError: # locale is None + pass + # it looks like some stuff isn't initialised yet when this is called the + # first time and its init gets the locale settings from the environment + os.environ["LC_ALL"] = newlocale + + +def formatTimestamp(timestamp=None): + """Return a formatted timestamp""" + # For some reason some timestamps are strings so we need to sanitize. + if timestamp is not None and not isinstance(timestamp, int): + try: + timestamp = int(timestamp) + except (ValueError, TypeError): + timestamp = None + + # timestamp can't be less than 0. + if timestamp is not None and timestamp < 0: + timestamp = None + + if timestamp is None: + timestring = time.strftime(time_format) + else: + # In case timestamp is too far in the future + try: + timestring = time.strftime(time_format, time.localtime(timestamp)) + except ValueError: + timestring = time.strftime(time_format) + + if sys.version_info[0] == 2: + return timestring.decode(encoding) + return timestring + + +def getTranslationLanguage(): + """Return the user's language choice""" + userlocale = BMConfigParser().safeGet( + 'bitmessagesettings', 'userlocale', 'system') + return userlocale if userlocale and userlocale != 'system' else language + + +def getWindowsLocale(posixLocale): + """ + Get the Windows locale + Technically this converts the locale string from UNIX to Windows format, + because they use different ones in their + libraries. E.g. "en_EN.UTF-8" to "english". + """ + if posixLocale in windowsLanguageMap: + return windowsLanguageMap[posixLocale] + if "." in posixLocale: + loc = posixLocale.split(".", 1) + if loc[0] in windowsLanguageMap: + return windowsLanguageMap[loc[0]] + if "_" in posixLocale: + loc = posixLocale.split("_", 1) + if loc[0] in windowsLanguageMap: + return windowsLanguageMap[loc[0]] + if posixLocale != DEFAULT_LANGUAGE: + return getWindowsLocale(DEFAULT_LANGUAGE) + return None diff --git a/src/tests/mock/pybitmessage/main.py b/src/tests/mock/pybitmessage/main.py new file mode 100644 index 00000000..e1644436 --- /dev/null +++ b/src/tests/mock/pybitmessage/main.py @@ -0,0 +1,13 @@ +"""This module is for thread start.""" +import state +import sys +from bitmessagemain import main +from termcolor import colored +print(colored('kivy is not supported at the moment for this version..', 'red')) +sys.exit() + + +if __name__ == '__main__': + state.kivy = True + print("Kivy Loading......") + main() diff --git a/src/tests/mock/pybitmessage/multiqueue.py b/src/tests/mock/pybitmessage/multiqueue.py new file mode 100644 index 00000000..88b6a4dd --- /dev/null +++ b/src/tests/mock/pybitmessage/multiqueue.py @@ -0,0 +1,54 @@ +""" +A queue with multiple internal subqueues. +Elements are added into a random subqueue, and retrieval rotates +""" + +from collections import deque + +from six.moves import queue + +try: + import helper_random +except ImportError: + from . import helper_random + + +class MultiQueue(queue.Queue): + """A base queue class""" + # pylint: disable=redefined-builtin,attribute-defined-outside-init + defaultQueueCount = 10 + + def __init__(self, maxsize=0, count=0): + if not count: + self.queueCount = MultiQueue.defaultQueueCount + else: + self.queueCount = count + queue.Queue.__init__(self, maxsize) + + # Initialize the queue representation + def _init(self, maxsize): + self.iter = 0 + self.queues = [] + for _ in range(self.queueCount): + self.queues.append(deque()) + + def _qsize(self, len=len): + return len(self.queues[self.iter]) + + # Put a new item in the queue + def _put(self, item): + # self.queue.append(item) + self.queues[helper_random.randomrandrange(self.queueCount)].append( + (item)) + + # Get an item from the queue + def _get(self): + return self.queues[self.iter].popleft() + + def iterate(self): + """Increment the iteration counter""" + self.iter = (self.iter + 1) % self.queueCount + + def totalSize(self): + """Return the total number of items in all the queues""" + return sum(len(x) for x in self.queues) diff --git a/src/tests/mock/pybitmessage/namecoin.py b/src/tests/mock/pybitmessage/namecoin.py new file mode 100644 index 00000000..33d39070 --- /dev/null +++ b/src/tests/mock/pybitmessage/namecoin.py @@ -0,0 +1,374 @@ +""" +Namecoin queries +""" +# pylint: disable=too-many-branches,protected-access + +import base64 +import httplib +import json +import os +import socket +import sys + +import defaults +from addresses import decodeAddress +from bmconfigparser import BMConfigParser +from debug import logger +from tr import _translate # translate + +configSection = "bitmessagesettings" + + +class RPCError(Exception): + """Error thrown when the RPC call returns an error.""" + + error = None + + def __init__(self, data): + super(RPCError, self).__init__() + self.error = data + + def __str__(self): + return "{0}: {1}".format(type(self).__name__, self.error) + + +class namecoinConnection(object): + """This class handles the Namecoin identity integration.""" + + user = None + password = None + host = None + port = None + nmctype = None + bufsize = 4096 + queryid = 1 + con = None + + def __init__(self, options=None): + """ + Initialise. If options are given, take the connection settings from + them instead of loading from the configs. This can be used to test + currently entered connection settings in the config dialog without + actually changing the values (yet). + """ + if options is None: + self.nmctype = BMConfigParser().get( + configSection, "namecoinrpctype") + self.host = BMConfigParser().get( + configSection, "namecoinrpchost") + self.port = int(BMConfigParser().get( + configSection, "namecoinrpcport")) + self.user = BMConfigParser().get( + configSection, "namecoinrpcuser") + self.password = BMConfigParser().get( + configSection, "namecoinrpcpassword") + else: + self.nmctype = options["type"] + self.host = options["host"] + self.port = int(options["port"]) + self.user = options["user"] + self.password = options["password"] + + assert self.nmctype == "namecoind" or self.nmctype == "nmcontrol" + if self.nmctype == "namecoind": + self.con = httplib.HTTPConnection(self.host, self.port, timeout=3) + + def query(self, identity): + """ + Query for the bitmessage address corresponding to the given identity + string. If it doesn't contain a slash, id/ is prepended. We return + the result as (Error, Address) pair, where the Error is an error + message to display or None in case of success. + """ + slashPos = identity.find("/") + if slashPos < 0: + display_name = identity + identity = "id/" + identity + else: + display_name = identity.split("/")[1] + + try: + if self.nmctype == "namecoind": + res = self.callRPC("name_show", [identity]) + res = res["value"] + elif self.nmctype == "nmcontrol": + res = self.callRPC("data", ["getValue", identity]) + res = res["reply"] + if not res: + return (_translate( + "MainWindow", "The name %1 was not found." + ).arg(identity.decode("utf-8", "ignore")), None) + else: + assert False + except RPCError as exc: + logger.exception("Namecoin query RPC exception") + if isinstance(exc.error, dict): + errmsg = exc.error["message"] + else: + errmsg = exc.error + return (_translate( + "MainWindow", "The namecoin query failed (%1)" + ).arg(errmsg.decode("utf-8", "ignore")), None) + except AssertionError: + return (_translate( + "MainWindow", "Unknown namecoin interface type: %1" + ).arg(self.nmctype.decode("utf-8", "ignore")), None) + except Exception: + logger.exception("Namecoin query exception") + return (_translate( + "MainWindow", "The namecoin query failed."), None) + + try: + res = json.loads(res) + except ValueError: + pass + else: + try: + display_name = res["name"] + except KeyError: + pass + res = res.get("bitmessage") + + valid = decodeAddress(res)[0] == "success" + return ( + None, "%s <%s>" % (display_name, res) + ) if valid else ( + _translate( + "MainWindow", + "The name %1 has no associated Bitmessage address." + ).arg(identity.decode("utf-8", "ignore")), None) + + def test(self): + """ + Test the connection settings. This routine tries to query a "getinfo" + command, and builds either an error message or a success message with + some info from it. + """ + try: + if self.nmctype == "namecoind": + try: + vers = self.callRPC("getinfo", [])["version"] + except RPCError: + vers = self.callRPC("getnetworkinfo", [])["version"] + + v3 = vers % 100 + vers = vers / 100 + v2 = vers % 100 + vers = vers / 100 + v1 = vers + if v3 == 0: + versStr = "0.%d.%d" % (v1, v2) + else: + versStr = "0.%d.%d.%d" % (v1, v2, v3) + message = ( + "success", + _translate( + "MainWindow", + "Success! Namecoind version %1 running.").arg( + versStr.decode("utf-8", "ignore"))) + + elif self.nmctype == "nmcontrol": + res = self.callRPC("data", ["status"]) + prefix = "Plugin data running" + if ("reply" in res) and res["reply"][:len(prefix)] == prefix: + return ( + "success", + _translate( + "MainWindow", + "Success! NMControll is up and running." + ) + ) + + logger.error("Unexpected nmcontrol reply: %s", res) + message = ( + "failed", + _translate( + "MainWindow", + "Couldn\'t understand NMControl." + ) + ) + + else: + sys.exit("Unsupported Namecoin type") + + return message + + except Exception: + logger.info("Namecoin connection test failure") + return ( + "failed", + _translate( + "MainWindow", "The connection to namecoin failed.") + ) + + def callRPC(self, method, params): + """Helper routine that actually performs an JSON RPC call.""" + + data = {"method": method, "params": params, "id": self.queryid} + if self.nmctype == "namecoind": + resp = self.queryHTTP(json.dumps(data)) + elif self.nmctype == "nmcontrol": + resp = self.queryServer(json.dumps(data)) + else: + assert False + val = json.loads(resp) + + if val["id"] != self.queryid: + raise Exception("ID mismatch in JSON RPC answer.") + + if self.nmctype == "namecoind": + self.queryid = self.queryid + 1 + + error = val["error"] + if error is None: + return val["result"] + + if isinstance(error, bool): + raise RPCError(val["result"]) + raise RPCError(error) + + def queryHTTP(self, data): + """Query the server via HTTP.""" + + result = None + + try: + self.con.putrequest("POST", "/") + self.con.putheader("Connection", "Keep-Alive") + self.con.putheader("User-Agent", "bitmessage") + self.con.putheader("Host", self.host) + self.con.putheader("Content-Type", "application/json") + self.con.putheader("Content-Length", str(len(data))) + self.con.putheader("Accept", "application/json") + authstr = "%s:%s" % (self.user, self.password) + self.con.putheader( + "Authorization", "Basic %s" % base64.b64encode(authstr)) + self.con.endheaders() + self.con.send(data) + except: # noqa:E722 + logger.info("HTTP connection error") + return None + + try: + resp = self.con.getresponse() + result = resp.read() + if resp.status != 200: + raise Exception( + "Namecoin returned status" + " %i: %s" % (resp.status, resp.reason)) + except: # noqa:E722 + logger.info("HTTP receive error") + return None + + return result + + def queryServer(self, data): + """Helper routine sending data to the RPC " + "server and returning the result.""" + + try: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s.settimeout(3) + s.connect((self.host, self.port)) + s.sendall(data) + result = "" + + while True: + tmp = s.recv(self.bufsize) + if not tmp: + break + result += tmp + + s.close() + + return result + + except socket.error as exc: + raise Exception("Socket error in RPC connection: %s" % exc) + + +def lookupNamecoinFolder(): + """ + Look up the namecoin data folder. + + .. todo:: Check whether this works on other platforms as well! + """ + + app = "namecoin" + from os import path, environ + if sys.platform == "darwin": + if "HOME" in environ: + dataFolder = path.join(os.environ["HOME"], + "Library/Application Support/", app) + "/" + else: + sys.exit( + "Could not find home folder, please report this message" + " and your OS X version to the BitMessage Github." + ) + + elif "win32" in sys.platform or "win64" in sys.platform: + dataFolder = path.join(environ["APPDATA"], app) + "\\" + else: + dataFolder = path.join(environ["HOME"], ".%s" % app) + "/" + + return dataFolder + + +def ensureNamecoinOptions(): + """ + Ensure all namecoin options are set, by setting those to default values + that aren't there. + """ + + if not BMConfigParser().has_option(configSection, "namecoinrpctype"): + BMConfigParser().set(configSection, "namecoinrpctype", "namecoind") + if not BMConfigParser().has_option(configSection, "namecoinrpchost"): + BMConfigParser().set(configSection, "namecoinrpchost", "localhost") + + hasUser = BMConfigParser().has_option(configSection, "namecoinrpcuser") + hasPass = BMConfigParser().has_option(configSection, "namecoinrpcpassword") + hasPort = BMConfigParser().has_option(configSection, "namecoinrpcport") + + # Try to read user/password from .namecoin configuration file. + defaultUser = "" + defaultPass = "" + nmcFolder = lookupNamecoinFolder() + nmcConfig = nmcFolder + "namecoin.conf" + try: + nmc = open(nmcConfig, "r") + + while True: + line = nmc.readline() + if line == "": + break + parts = line.split("=") + if len(parts) == 2: + key = parts[0] + val = parts[1].rstrip() + + if key == "rpcuser" and not hasUser: + defaultUser = val + if key == "rpcpassword" and not hasPass: + defaultPass = val + if key == "rpcport": + defaults.namecoinDefaultRpcPort = val + + nmc.close() + except IOError: + logger.warning( + "%s unreadable or missing, Namecoin support deactivated", + nmcConfig) + except Exception: + logger.warning("Error processing namecoin.conf", exc_info=True) + + # If still nothing found, set empty at least. + if not hasUser: + BMConfigParser().set(configSection, "namecoinrpcuser", defaultUser) + if not hasPass: + BMConfigParser().set(configSection, "namecoinrpcpassword", defaultPass) + + # Set default port now, possibly to found value. + if not hasPort: + BMConfigParser().set(configSection, "namecoinrpcport", + defaults.namecoinDefaultRpcPort) diff --git a/src/tests/mock/pybitmessage/network/__init__.py b/src/tests/mock/pybitmessage/network/__init__.py new file mode 100644 index 00000000..70613539 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/__init__.py @@ -0,0 +1,20 @@ +""" +Network subsystem packages +""" +from addrthread import AddrThread +from announcethread import AnnounceThread +from connectionpool import BMConnectionPool +from dandelion import Dandelion +from downloadthread import DownloadThread +from invthread import InvThread +from networkthread import BMNetworkThread +from receivequeuethread import ReceiveQueueThread +from threads import StoppableThread +from uploadthread import UploadThread + + +__all__ = [ + "BMConnectionPool", "Dandelion", + "AddrThread", "AnnounceThread", "BMNetworkThread", "DownloadThread", + "InvThread", "ReceiveQueueThread", "UploadThread", "StoppableThread" +] diff --git a/src/tests/mock/pybitmessage/network/addrthread.py b/src/tests/mock/pybitmessage/network/addrthread.py new file mode 100644 index 00000000..79ed651b --- /dev/null +++ b/src/tests/mock/pybitmessage/network/addrthread.py @@ -0,0 +1,49 @@ +""" +Announce addresses as they are received from other hosts +""" +from six.moves import queue + + +from pybitmessage import state +from pybitmessage.helper_random import randomshuffle +from pybitmessage.network.assemble import assemble_addr +from pybitmessage.network.connectionpool import BMConnectionPool +from pybitmessage.queues import addrQueue +from pybitmessage.threads import StoppableThread + + +class AddrThread(StoppableThread): + """(Node) address broadcasting thread""" + name = "AddrBroadcaster" + + def run(self): + while not state.shutdown: + chunk = [] + while True: + try: + data = addrQueue.get(False) + chunk.append(data) + except queue.Empty: + break + + if chunk: + # Choose peers randomly + connections = BMConnectionPool().establishedConnections() + randomshuffle(connections) + for i in connections: + randomshuffle(chunk) + filtered = [] + for stream, peer, seen, destination in chunk: + # peer's own address or address received from peer + if i.destination in (peer, destination): + continue + if stream not in i.streams: + continue + filtered.append((stream, peer, seen)) + if filtered: + i.append_write_buf(assemble_addr(filtered)) + + addrQueue.iterate() + for i in range(len(chunk)): + addrQueue.task_done() + self.stop.wait(1) diff --git a/src/tests/mock/pybitmessage/network/advanceddispatcher.py b/src/tests/mock/pybitmessage/network/advanceddispatcher.py new file mode 100644 index 00000000..645d7ee2 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/advanceddispatcher.py @@ -0,0 +1,173 @@ +""" +Improved version of asyncore dispatcher +""" +import socket +import threading +import time + +from pybitmessage.network import asyncore_pollchoose as asyncore +from pybitmessage import state +from pybitmessage.threads import BusyError, nonBlocking + + +class ProcessingError(Exception): + """General class for protocol parser exception, + use as a base for others.""" + pass + + +class UnknownStateError(ProcessingError): + """Parser points to an unknown (unimplemented) state.""" + pass + + +class AdvancedDispatcher(asyncore.dispatcher): + """Improved version of asyncore dispatcher, + with buffers and protocol state.""" + # pylint: disable=too-many-instance-attributes + _buf_len = 131072 # 128kB + + def __init__(self, sock=None): + if not hasattr(self, '_map'): + asyncore.dispatcher.__init__(self, sock) + self.connectedAt = 0 + self.close_reason = None + self.read_buf = bytearray() + self.write_buf = bytearray() + self.state = "init" + self.lastTx = time.time() + self.sentBytes = 0 + self.receivedBytes = 0 + self.expectBytes = 0 + self.readLock = threading.RLock() + self.writeLock = threading.RLock() + self.processingLock = threading.RLock() + self.uploadChunk = self.downloadChunk = 0 + + def append_write_buf(self, data): + """Append binary data to the end of stream write buffer.""" + if data: + if isinstance(data, list): + with self.writeLock: + for chunk in data: + self.write_buf.extend(chunk) + else: + with self.writeLock: + self.write_buf.extend(data) + + def slice_write_buf(self, length=0): + """Cut the beginning of the stream write buffer.""" + if length > 0: + with self.writeLock: + if length >= len(self.write_buf): + del self.write_buf[:] + else: + del self.write_buf[0:length] + + def slice_read_buf(self, length=0): + """Cut the beginning of the stream read buffer.""" + if length > 0: + with self.readLock: + if length >= len(self.read_buf): + del self.read_buf[:] + else: + del self.read_buf[0:length] + + def process(self): + """Process (parse) data that's in the buffer, + as long as there is enough data and the connection is open.""" + while self.connected and not state.shutdown: + try: + with nonBlocking(self.processingLock): + if not self.connected or state.shutdown: + break + if len(self.read_buf) < self.expectBytes: + return False + try: + cmd = getattr(self, "state_" + str(self.state)) + except AttributeError: + self.logger.error( + 'Unknown state %s', self.state, exc_info=True) + raise UnknownStateError(self.state) + if not cmd(): + break + except BusyError: + return False + return False + + def set_state(self, state_str, length=0, expectBytes=0): + """Set the next processing state.""" + self.expectBytes = expectBytes + self.slice_read_buf(length) + self.state = state_str + + def writable(self): + """Is data from the write buffer ready to be sent to the network?""" + self.uploadChunk = AdvancedDispatcher._buf_len + if asyncore.maxUploadRate > 0: + self.uploadChunk = int(asyncore.uploadBucket) + self.uploadChunk = min(self.uploadChunk, len(self.write_buf)) + return asyncore.dispatcher.writable(self) and ( + self.connecting or ( + self.connected and self.uploadChunk > 0)) + + def readable(self): + """Is the read buffer ready to accept data from the network?""" + self.downloadChunk = AdvancedDispatcher._buf_len + if asyncore.maxDownloadRate > 0: + self.downloadChunk = int(asyncore.downloadBucket) + try: + if self.expectBytes > 0 and not self.fullyEstablished: + self.downloadChunk = min( + self.downloadChunk, self.expectBytes - len(self.read_buf)) + if self.downloadChunk < 0: + self.downloadChunk = 0 + except AttributeError: + pass + return asyncore.dispatcher.readable(self) and ( + self.connecting or self.accepting or ( + self.connected and self.downloadChunk > 0)) + + def handle_read(self): + """Append incoming data to the read buffer.""" + self.lastTx = time.time() + newData = self.recv(self.downloadChunk) + self.receivedBytes += len(newData) + asyncore.update_received(len(newData)) + with self.readLock: + self.read_buf.extend(newData) + + def handle_write(self): + """Send outgoing data from write buffer.""" + self.lastTx = time.time() + written = self.send(self.write_buf[0:self.uploadChunk]) + asyncore.update_sent(written) + self.sentBytes += written + self.slice_write_buf(written) + + def handle_connect_event(self): + """Callback for connection established event.""" + try: + asyncore.dispatcher.handle_connect_event(self) + except socket.error as e: + # pylint: disable=protected-access + if e.args[0] not in asyncore._DISCONNECTED: + raise + + def handle_connect(self): + """Method for handling connection established implementations.""" + self.lastTx = time.time() + + def state_close(self): # pylint: disable=no-self-use + """Signal to the processing loop to end.""" + return False + + def handle_close(self): + """Callback for connection being closed, + but can also be called directly when you want connection to close.""" + with self.readLock: + self.read_buf = bytearray() + with self.writeLock: + self.write_buf = bytearray() + self.set_state("close") + self.close() diff --git a/src/tests/mock/pybitmessage/network/announcethread.py b/src/tests/mock/pybitmessage/network/announcethread.py new file mode 100644 index 00000000..e34ed963 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/announcethread.py @@ -0,0 +1,43 @@ +""" +Announce myself (node address) +""" +import time + +import state +from bmconfigparser import BMConfigParser +from network.assemble import assemble_addr +from network.connectionpool import BMConnectionPool +from node import Peer +from threads import StoppableThread + + +class AnnounceThread(StoppableThread): + """A thread to manage regular announcing of this node""" + name = "Announcer" + announceInterval = 60 + + def run(self): + lastSelfAnnounced = 0 + while not self._stopped and state.shutdown == 0: + processed = 0 + if lastSelfAnnounced < time.time() - self.announceInterval: + self.announceSelf() + lastSelfAnnounced = time.time() + if processed == 0: + self.stop.wait(10) + + @staticmethod + def announceSelf(): + """Announce our presence""" + for connection in BMConnectionPool().udpSockets.values(): + if not connection.announcing: + continue + for stream in state.streamsInWhichIAmParticipating: + addr = ( + stream, + Peer( + '127.0.0.1', + BMConfigParser().safeGetInt( + 'bitmessagesettings', 'port')), + time.time()) + connection.append_write_buf(assemble_addr([addr])) diff --git a/src/tests/mock/pybitmessage/network/assemble.py b/src/tests/mock/pybitmessage/network/assemble.py new file mode 100644 index 00000000..599559a0 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/assemble.py @@ -0,0 +1,31 @@ +""" +Create bitmessage protocol command packets +""" +import struct + +from pybitmessage import addresses +from pybitmessage.network.constants import MAX_ADDR_COUNT +from pybitmessage.network.node import Peer +from pybitmessage.protocol import CreatePacket, encodeHost + + +def assemble_addr(peerList): + """Create address command""" + if isinstance(peerList, Peer): + peerList = [peerList] + if not peerList: + return b'' + retval = b'' + for i in range(0, len(peerList), MAX_ADDR_COUNT): + payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) + for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: + # 64-bit time + payload += struct.pack('>Q', timestamp) + payload += struct.pack('>I', stream) + # service bit flags offered by this node + payload += struct.pack('>q', 1) + payload += encodeHost(peer.host) + # remote port + payload += struct.pack('>H', peer.port) + retval += CreatePacket('addr', payload) + return retval diff --git a/src/tests/mock/pybitmessage/network/asyncore_pollchoose.py b/src/tests/mock/pybitmessage/network/asyncore_pollchoose.py new file mode 100644 index 00000000..2265ab3b --- /dev/null +++ b/src/tests/mock/pybitmessage/network/asyncore_pollchoose.py @@ -0,0 +1,1012 @@ +""" +Basic infrastructure for asynchronous socket service clients and servers. +""" +# -*- Mode: Python -*- +# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp +# Author: Sam Rushing +# pylint: disable=too-many-branches,too-many-lines,global-statement +# pylint: disable=redefined-builtin,no-self-use +import os +import select +import socket +import sys +import time +import warnings +from errno import ( + EADDRINUSE, EAGAIN, EALREADY, EBADF, ECONNABORTED, ECONNREFUSED, + ECONNRESET, EHOSTUNREACH, EINPROGRESS, EINTR, EINVAL, EISCONN, ENETUNREACH, + ENOTCONN, ENOTSOCK, EPIPE, ESHUTDOWN, ETIMEDOUT, EWOULDBLOCK, errorcode +) +from threading import current_thread + +from pybitmessage import helper_random + +try: + from errno import WSAEWOULDBLOCK +except (ImportError, AttributeError): + WSAEWOULDBLOCK = EWOULDBLOCK +try: + from errno import WSAENOTSOCK +except (ImportError, AttributeError): + WSAENOTSOCK = ENOTSOCK +try: + from errno import WSAECONNRESET +except (ImportError, AttributeError): + WSAECONNRESET = ECONNRESET +try: + # Desirable side-effects on Windows; imports winsock error numbers + from errno import WSAEADDRINUSE # pylint: disable=unused-import +except (ImportError, AttributeError): + WSAEADDRINUSE = EADDRINUSE + + +_DISCONNECTED = frozenset(( + ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE, EBADF, ECONNREFUSED, + EHOSTUNREACH, ENETUNREACH, ETIMEDOUT, WSAECONNRESET)) + +OP_READ = 1 +OP_WRITE = 2 + +try: + socket_map +except NameError: + socket_map = {} + + +def _strerror(err): + try: + return os.strerror(err) + except (ValueError, OverflowError, NameError): + if err in errorcode: + return errorcode[err] + return "Unknown error %s" % err + + +class ExitNow(Exception): + """We don't use directly but may be necessary as we replace + asyncore due to some library raising or expecting it""" + pass + + +_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit) + +maxDownloadRate = 0 +downloadTimestamp = 0 +downloadBucket = 0 +receivedBytes = 0 +maxUploadRate = 0 +uploadTimestamp = 0 +uploadBucket = 0 +sentBytes = 0 + + +def read(obj): + """Event to read from the object, i.e. its network socket.""" + + if not can_receive(): + return + try: + obj.handle_read_event() + except _reraised_exceptions: + raise + except BaseException: + obj.handle_error() + + +def write(obj): + """Event to write to the object, i.e. its network socket.""" + + if not can_send(): + return + try: + obj.handle_write_event() + except _reraised_exceptions: + raise + except BaseException: + obj.handle_error() + + +def set_rates(download, upload): + """Set throttling rates""" + + global maxDownloadRate, maxUploadRate, downloadBucket + global uploadBucket, downloadTimestamp, uploadTimestamp + + maxDownloadRate = float(download) * 1024 + maxUploadRate = float(upload) * 1024 + downloadBucket = maxDownloadRate + uploadBucket = maxUploadRate + downloadTimestamp = time.time() + uploadTimestamp = time.time() + + +def can_receive(): + """Predicate indicating whether the download throttle is in effect""" + + return maxDownloadRate == 0 or downloadBucket > 0 + + +def can_send(): + """Predicate indicating whether the upload throttle is in effect""" + + return maxUploadRate == 0 or uploadBucket > 0 + + +def update_received(download=0): + """Update the receiving throttle""" + + global receivedBytes, downloadBucket, downloadTimestamp + + currentTimestamp = time.time() + receivedBytes += download + if maxDownloadRate > 0: + bucketIncrease = \ + maxDownloadRate * (currentTimestamp - downloadTimestamp) + downloadBucket += bucketIncrease + if downloadBucket > maxDownloadRate: + downloadBucket = int(maxDownloadRate) + downloadBucket -= download + downloadTimestamp = currentTimestamp + + +def update_sent(upload=0): + """Update the sending throttle""" + + global sentBytes, uploadBucket, uploadTimestamp + + currentTimestamp = time.time() + sentBytes += upload + if maxUploadRate > 0: + bucketIncrease = maxUploadRate * (currentTimestamp - uploadTimestamp) + uploadBucket += bucketIncrease + if uploadBucket > maxUploadRate: + uploadBucket = int(maxUploadRate) + uploadBucket -= upload + uploadTimestamp = currentTimestamp + + +def _exception(obj): + """Handle exceptions as appropriate""" + + try: + obj.handle_expt_event() + except _reraised_exceptions: + raise + except BaseException: + obj.handle_error() + + +def readwrite(obj, flags): + """Read and write any pending data to/from the object""" + + try: + if flags & select.POLLIN and can_receive(): + obj.handle_read_event() + if flags & select.POLLOUT and can_send(): + obj.handle_write_event() + if flags & select.POLLPRI: + obj.handle_expt_event() + if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL): + obj.handle_close() + except socket.error as e: + if e.args[0] not in _DISCONNECTED: + obj.handle_error() + else: + obj.handle_close() + except _reraised_exceptions: + raise + except BaseException: + obj.handle_error() + + +def select_poller(timeout=0.0, map=None): + """A poller which uses select(), available on most platforms.""" + + if map is None: + map = socket_map + if map: + r = [] + w = [] + e = [] + for fd, obj in list(map.items()): + is_r = obj.readable() + is_w = obj.writable() + if is_r: + r.append(fd) + # accepting sockets should not be writable + if is_w and not obj.accepting: + w.append(fd) + if is_r or is_w: + e.append(fd) + if [] == r == w == e: + time.sleep(timeout) + return + + try: + r, w, e = select.select(r, w, e, timeout) + except KeyboardInterrupt: + return + except socket.error as err: + if err.args[0] in (EBADF, EINTR): + return + except Exception as err: + if err.args[0] in (WSAENOTSOCK, ): + return + + for fd in helper_random.randomsample(r, len(r)): + obj = map.get(fd) + if obj is None: + continue + read(obj) + + for fd in helper_random.randomsample(w, len(w)): + obj = map.get(fd) + if obj is None: + continue + write(obj) + + for fd in e: + obj = map.get(fd) + if obj is None: + continue + _exception(obj) + else: + current_thread().stop.wait(timeout) + + +def poll_poller(timeout=0.0, map=None): + """A poller which uses poll(), available on most UNIXen.""" + + if map is None: + map = socket_map + if timeout is not None: + # timeout is in milliseconds + timeout = int(timeout * 1000) + try: + poll_poller.pollster + except AttributeError: + poll_poller.pollster = select.poll() + if map: + for fd, obj in list(map.items()): + flags = newflags = 0 + if obj.readable(): + flags |= select.POLLIN | select.POLLPRI + newflags |= OP_READ + else: + newflags &= ~ OP_READ + # accepting sockets should not be writable + if obj.writable() and not obj.accepting: + flags |= select.POLLOUT + newflags |= OP_WRITE + else: + newflags &= ~ OP_WRITE + if newflags != obj.poller_flags: + obj.poller_flags = newflags + try: + if obj.poller_registered: + poll_poller.pollster.modify(fd, flags) + else: + poll_poller.pollster.register(fd, flags) + obj.poller_registered = True + except IOError: + pass + try: + r = poll_poller.pollster.poll(timeout) + except KeyboardInterrupt: + r = [] + except socket.error as err: + if err.args[0] in (EBADF, WSAENOTSOCK, EINTR): + return + for fd, flags in helper_random.randomsample(r, len(r)): + obj = map.get(fd) + if obj is None: + continue + readwrite(obj, flags) + else: + current_thread().stop.wait(timeout) + + +# Aliases for backward compatibility +poll = select_poller +poll2 = poll3 = poll_poller + + +def epoll_poller(timeout=0.0, map=None): + """A poller which uses epoll(), supported on Linux 2.5.44 and newer.""" + + if map is None: + map = socket_map + try: + epoll_poller.pollster + except AttributeError: + epoll_poller.pollster = select.epoll() + if map: + for fd, obj in map.items(): + flags = newflags = 0 + if obj.readable(): + flags |= select.POLLIN | select.POLLPRI + newflags |= OP_READ + else: + newflags &= ~ OP_READ + # accepting sockets should not be writable + if obj.writable() and not obj.accepting: + flags |= select.POLLOUT + newflags |= OP_WRITE + else: + newflags &= ~ OP_WRITE + if newflags != obj.poller_flags: + obj.poller_flags = newflags + # Only check for exceptions if object was either readable + # or writable. + flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL + try: + if obj.poller_registered: + epoll_poller.pollster.modify(fd, flags) + else: + epoll_poller.pollster.register(fd, flags) + obj.poller_registered = True + except IOError: + pass + try: + r = epoll_poller.pollster.poll(timeout) + except IOError as e: + if e.errno != EINTR: + raise + r = [] + except select.error as err: + if err.args[0] != EINTR: + raise + r = [] + for fd, flags in helper_random.randomsample(r, len(r)): + obj = map.get(fd) + if obj is None: + continue + readwrite(obj, flags) + else: + current_thread().stop.wait(timeout) + + +def kqueue_poller(timeout=0.0, map=None): + """A poller which uses kqueue(), BSD specific.""" + # pylint: disable=no-member,too-many-statements + + if map is None: + map = socket_map + try: + kqueue_poller.pollster + except AttributeError: + kqueue_poller.pollster = select.kqueue() + if map: + updates = [] + selectables = 0 + for fd, obj in map.items(): + kq_filter = 0 + if obj.readable(): + kq_filter |= 1 + selectables += 1 + if obj.writable() and not obj.accepting: + kq_filter |= 2 + selectables += 1 + if kq_filter != obj.poller_filter: + # unlike other pollers, READ and WRITE aren't OR able but have + # to be set and checked separately + if kq_filter & 1 != obj.poller_filter & 1: + poller_flags = select.KQ_EV_ADD + if kq_filter & 1: + poller_flags |= select.KQ_EV_ENABLE + else: + poller_flags |= select.KQ_EV_DISABLE + updates.append( + select.kevent( + fd, filter=select.KQ_FILTER_READ, + flags=poller_flags)) + if kq_filter & 2 != obj.poller_filter & 2: + poller_flags = select.KQ_EV_ADD + if kq_filter & 2: + poller_flags |= select.KQ_EV_ENABLE + else: + poller_flags |= select.KQ_EV_DISABLE + updates.append( + select.kevent( + fd, filter=select.KQ_FILTER_WRITE, + flags=poller_flags)) + obj.poller_filter = kq_filter + + if not selectables: + # unlike other pollers, kqueue poll does not wait if there are no + # filters setup + current_thread().stop.wait(timeout) + return + + events = kqueue_poller.pollster.control(updates, selectables, timeout) + if len(events) > 1: + events = helper_random.randomsample(events, len(events)) + + for event in events: + fd = event.ident + obj = map.get(fd) + if obj is None: + continue + if event.flags & select.KQ_EV_ERROR: + _exception(obj) + continue + if event.flags & select.KQ_EV_EOF and event.data and event.fflags: + obj.handle_close() + continue + if event.filter == select.KQ_FILTER_READ: + read(obj) + if event.filter == select.KQ_FILTER_WRITE: + write(obj) + else: + current_thread().stop.wait(timeout) + + +def loop(timeout=30.0, use_poll=False, map=None, count=None, poller=None): + """Poll in a loop, until count or timeout is reached""" + + if map is None: + map = socket_map + if count is None: + count = True + # code which grants backward compatibility with "use_poll" + # argument which should no longer be used in favor of + # "poller" + + if poller is None: + if use_poll: + poller = poll_poller + elif hasattr(select, 'epoll'): + poller = epoll_poller + elif hasattr(select, 'kqueue'): + poller = kqueue_poller + elif hasattr(select, 'poll'): + poller = poll_poller + elif hasattr(select, 'select'): + poller = select_poller + + if timeout == 0: + deadline = 0 + else: + deadline = time.time() + timeout + while count: + # fill buckets first + update_sent() + update_received() + subtimeout = deadline - time.time() + if subtimeout <= 0: + break + # then poll + poller(subtimeout, map) + if isinstance(count, int): + count = count - 1 + + +class dispatcher(object): + """Dispatcher for socket objects""" + # pylint: disable=too-many-public-methods,too-many-instance-attributes + + debug = False + connected = False + accepting = False + connecting = False + closing = False + addr = None + ignore_log_types = frozenset(['warning']) + poller_registered = False + poller_flags = 0 + # don't do network IO with a smaller bucket than this + minTx = 1500 + + def __init__(self, sock=None, map=None): + if map is None: + self._map = socket_map + else: + self._map = map + + self._fileno = None + + if sock: + # Set to nonblocking just to make sure for cases where we + # get a socket from a blocking source. + sock.setblocking(0) + self.set_socket(sock, map) + self.connected = True + # The constructor no longer requires that the socket + # passed be connected. + try: + self.addr = sock.getpeername() + except socket.error as err: + if err.args[0] in (ENOTCONN, EINVAL): + # To handle the case where we got an unconnected + # socket. + self.connected = False + else: + # The socket is broken in some unknown way, alert + # the user and remove it from the map (to prevent + # polling of broken sockets). + self.del_channel(map) + raise + else: + self.socket = None + + def __repr__(self): + status = [self.__class__.__module__ + "." + self.__class__.__name__] + if self.accepting and self.addr: + status.append('listening') + elif self.connected: + status.append('connected') + if self.addr is not None: + try: + status.append('%s:%d' % self.addr) + except TypeError: + status.append(repr(self.addr)) + return '<%s at %#x>' % (' '.join(status), id(self)) + + __str__ = __repr__ + + def add_channel(self, map=None): + """Add a channel""" + # pylint: disable=attribute-defined-outside-init + if map is None: + map = self._map + map[self._fileno] = self + self.poller_flags = 0 + self.poller_filter = 0 + + def del_channel(self, map=None): + """Delete a channel""" + fd = self._fileno + if map is None: + map = self._map + if fd in map: + del map[fd] + if self._fileno: + try: + kqueue_poller.pollster.control([select.kevent( + fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE)], 0) + except(AttributeError, KeyError, TypeError, IOError, OSError): + pass + try: + kqueue_poller.pollster.control([select.kevent( + fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)], 0) + except(AttributeError, KeyError, TypeError, IOError, OSError): + pass + try: + epoll_poller.pollster.unregister(fd) + except (AttributeError, KeyError, TypeError, IOError): + # no epoll used, or not registered + pass + try: + poll_poller.pollster.unregister(fd) + except (AttributeError, KeyError, TypeError, IOError): + # no poll used, or not registered + pass + self._fileno = None + self.poller_flags = 0 + self.poller_filter = 0 + self.poller_registered = False + + def create_socket( + self, family=socket.AF_INET, socket_type=socket.SOCK_STREAM): + """Create a socket""" + # pylint: disable=attribute-defined-outside-init + self.family_and_type = family, socket_type + sock = socket.socket(family, socket_type) + sock.setblocking(0) + self.set_socket(sock) + + def set_socket(self, sock, map=None): + """Set socket""" + self.socket = sock + self._fileno = sock.fileno() + self.add_channel(map) + + def set_reuse_addr(self): + """try to re-use a server port if possible""" + try: + self.socket.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, self.socket.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1 + ) + except socket.error: + pass + + # ================================================== + # predicates for select() + # these are used as filters for the lists of sockets + # to pass to select(). + # ================================================== + + def readable(self): + """Predicate to indicate download throttle status""" + if maxDownloadRate > 0: + return downloadBucket > dispatcher.minTx + return True + + def writable(self): + """Predicate to indicate upload throttle status""" + if maxUploadRate > 0: + return uploadBucket > dispatcher.minTx + return True + + # ================================================== + # socket object methods. + # ================================================== + + def listen(self, num): + """Listen on a port""" + self.accepting = True + if os.name == 'nt' and num > 5: + num = 5 + return self.socket.listen(num) + + def bind(self, addr): + """Bind to an address""" + self.addr = addr + return self.socket.bind(addr) + + def connect(self, address): + """Connect to an address""" + self.connected = False + self.connecting = True + err = self.socket.connect_ex(address) + if err in (EINPROGRESS, EALREADY, EWOULDBLOCK, WSAEWOULDBLOCK) \ + or err == EINVAL and os.name in ('nt', 'ce'): + self.addr = address + return + if err in (0, EISCONN): + self.addr = address + self.handle_connect_event() + else: + raise socket.error(err, errorcode[err]) + + def accept(self): + """Accept incoming connections. + Returns either an address pair or None.""" + try: + conn, addr = self.socket.accept() + except TypeError: + return None + except socket.error as why: + if why.args[0] in ( + EWOULDBLOCK, WSAEWOULDBLOCK, ECONNABORTED, + EAGAIN, ENOTCONN): + return None + else: + raise + else: + return conn, addr + + def send(self, data): + """Send data""" + try: + result = self.socket.send(data) + return result + except socket.error as why: + if why.args[0] in (EAGAIN, EWOULDBLOCK, WSAEWOULDBLOCK): + return 0 + elif why.args[0] in _DISCONNECTED: + self.handle_close() + return 0 + else: + raise + + def recv(self, buffer_size): + """Receive data""" + try: + data = self.socket.recv(buffer_size) + if not data: + # a closed connection is indicated by signaling + # a read condition, and having recv() return 0. + self.handle_close() + return b'' + return data + except socket.error as why: + # winsock sometimes raises ENOTCONN + if why.args[0] in (EAGAIN, EWOULDBLOCK, WSAEWOULDBLOCK): + return b'' + if why.args[0] in _DISCONNECTED: + self.handle_close() + return b'' + else: + raise + + def close(self): + """Close connection""" + self.connected = False + self.accepting = False + self.connecting = False + self.del_channel() + try: + self.socket.close() + except socket.error as why: + if why.args[0] not in (ENOTCONN, EBADF): + raise + + # cheap inheritance, used to pass all other attribute + # references to the underlying socket object. + def __getattr__(self, attr): + try: + retattr = getattr(self.socket, attr) + except AttributeError: + raise AttributeError( + "%s instance has no attribute '%s'" + % (self.__class__.__name__, attr)) + else: + msg = "%(me)s.%(attr)s is deprecated; use %(me)s.socket.%(attr)s"\ + " instead" % {'me': self.__class__.__name__, 'attr': attr} + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return retattr + + # log and log_info may be overridden to provide more sophisticated + # logging and warning methods. In general, log is for 'hit' logging + # and 'log_info' is for informational, warning and error logging. + + def log(self, message): + """Log a message to stderr""" + sys.stderr.write('log: %s\n' % str(message)) + + def log_info(self, message, log_type='info'): + """Conditionally print a message""" + if log_type not in self.ignore_log_types: + print('%s: %s' % (log_type, message)) + + def handle_read_event(self): + """Handle a read event""" + if self.accepting: + # accepting sockets are never connected, they "spawn" new + # sockets that are connected + self.handle_accept() + elif not self.connected: + if self.connecting: + self.handle_connect_event() + self.handle_read() + else: + self.handle_read() + + def handle_connect_event(self): + """Handle a connection event""" + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + raise socket.error(err, _strerror(err)) + self.handle_connect() + self.connected = True + self.connecting = False + + def handle_write_event(self): + """Handle a write event""" + if self.accepting: + # Accepting sockets shouldn't get a write event. + # We will pretend it didn't happen. + return + + if not self.connected: + if self.connecting: + self.handle_connect_event() + self.handle_write() + + def handle_expt_event(self): + """Handle expected exceptions""" + # handle_expt_event() is called if there might be an error on the + # socket, or if there is OOB data + # check for the error condition first + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + # we can get here when select.select() says that there is an + # exceptional condition on the socket + # since there is an error, we'll go ahead and close the socket + # like we would in a subclassed handle_read() that received no + # data + self.handle_close() + elif sys.platform.startswith("win"): + # async connect failed + self.handle_close() + else: + self.handle_expt() + + def handle_error(self): + """Handle unexpected exceptions""" + _, t, v, tbinfo = compact_traceback() + + # sometimes a user repr method will crash. + try: + self_repr = repr(self) + except BaseException: + self_repr = '<__repr__(self) failed for object at %0x>' % id(self) + + self.log_info( + 'uncaptured python exception, closing channel %s (%s:%s %s)' % ( + self_repr, t, v, tbinfo), + 'error') + self.handle_close() + + def handle_accept(self): + """Handle an accept event""" + pair = self.accept() + if pair is not None: + self.handle_accepted(*pair) + + def handle_expt(self): + """Log that the subclass does not implement handle_expt""" + self.log_info('unhandled incoming priority event', 'warning') + + def handle_read(self): + """Log that the subclass does not implement handle_read""" + self.log_info('unhandled read event', 'warning') + + def handle_write(self): + """Log that the subclass does not implement handle_write""" + self.log_info('unhandled write event', 'warning') + + def handle_connect(self): + """Log that the subclass does not implement handle_connect""" + self.log_info('unhandled connect event', 'warning') + + def handle_accepted(self, sock, addr): + """Log that the subclass does not implement handle_accepted""" + sock.close() + self.log_info('unhandled accepted event on %s' % (addr), 'warning') + + def handle_close(self): + """Log that the subclass does not implement handle_close""" + self.log_info('unhandled close event', 'warning') + self.close() + + +class dispatcher_with_send(dispatcher): + """ + adds simple buffered output capability, useful for simple clients. + [for more sophisticated usage use asynchat.async_chat] + """ + + def __init__(self, sock=None, map=None): + dispatcher.__init__(self, sock, map) + self.out_buffer = b'' + + def initiate_send(self): + """Initiate a send""" + num_sent = 0 + num_sent = dispatcher.send(self, self.out_buffer[:512]) + self.out_buffer = self.out_buffer[num_sent:] + + def handle_write(self): + """Handle a write event""" + self.initiate_send() + + def writable(self): + """Predicate to indicate if the object is writable""" + return not self.connected or len(self.out_buffer) + + def send(self, data): + """Send data""" + if self.debug: + self.log_info('sending %s' % repr(data)) + self.out_buffer = self.out_buffer + data + self.initiate_send() + + +# --------------------------------------------------------------------------- +# used for debugging. +# --------------------------------------------------------------------------- + + +def compact_traceback(): + """Return a compact traceback""" + t, v, tb = sys.exc_info() + tbinfo = [] + # Must have a traceback + if not tb: + raise AssertionError("traceback does not exist") + while tb: + tbinfo.append(( + tb.tb_frame.f_code.co_filename, + tb.tb_frame.f_code.co_name, + str(tb.tb_lineno) + )) + tb = tb.tb_next + + # just to be safe + del tb + + filename, function, line = tbinfo[-1] + info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo]) + return (filename, function, line), t, v, info + + +def close_all(map=None, ignore_all=False): + """Close all connections""" + + if map is None: + map = socket_map + for x in list(map.values()): + try: + x.close() + except OSError as e: + if e.args[0] == EBADF: + pass + elif not ignore_all: + raise + except _reraised_exceptions: + raise + except BaseException: + if not ignore_all: + raise + map.clear() + + +# Asynchronous File I/O: +# +# After a little research (reading man pages on various unixen, and +# digging through the linux kernel), I've determined that select() +# isn't meant for doing asynchronous file i/o. +# Heartening, though - reading linux/mm/filemap.c shows that linux +# supports asynchronous read-ahead. So _MOST_ of the time, the data +# will be sitting in memory for us already when we go to read it. +# +# What other OS's (besides NT) support async file i/o? [VMS?] +# +# Regardless, this is useful for pipes, and stdin/stdout... + + +if os.name == 'posix': + import fcntl + + class file_wrapper: # pylint: disable=old-style-class + """ + Here we override just enough to make a file look + like a socket for the purposes of asyncore. + + The passed fd is automatically os.dup()'d + """ + + def __init__(self, fd): + self.fd = os.dup(fd) + + def recv(self, *args): + """Fake recv()""" + return os.read(self.fd, *args) + + def send(self, *args): + """Fake send()""" + return os.write(self.fd, *args) + + def getsockopt(self, level, optname, buflen=None): + """Fake getsockopt()""" + if (level == socket.SOL_SOCKET and optname == socket.SO_ERROR + and not buflen): + return 0 + raise NotImplementedError( + "Only asyncore specific behaviour implemented.") + + read = recv + write = send + + def close(self): + """Fake close()""" + os.close(self.fd) + + def fileno(self): + """Fake fileno()""" + return self.fd + + class file_dispatcher(dispatcher): + """A dispatcher for file_wrapper objects""" + + def __init__(self, fd, map=None): + dispatcher.__init__(self, None, map) + self.connected = True + try: + fd = fd.fileno() + except AttributeError: + pass + self.set_file(fd) + # set it to non-blocking mode + flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0) + flags = flags | os.O_NONBLOCK + fcntl.fcntl(fd, fcntl.F_SETFL, flags) + + def set_file(self, fd): + """Set file""" + self.socket = file_wrapper(fd) + self._fileno = self.socket.fileno() + self.add_channel() diff --git a/src/tests/mock/pybitmessage/network/bmobject.py b/src/tests/mock/pybitmessage/network/bmobject.py new file mode 100644 index 00000000..12b997d7 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/bmobject.py @@ -0,0 +1,164 @@ +""" +BMObject and it's exceptions. +""" +import logging +import time + +import protocol +import state +from addresses import calculateInventoryHash +from inventory import Inventory +from network.dandelion import Dandelion + +logger = logging.getLogger('default') + + +class BMObjectInsufficientPOWError(Exception): + """Exception indicating the object + doesn't have sufficient proof of work.""" + errorCodes = ("Insufficient proof of work") + + +class BMObjectInvalidDataError(Exception): + """Exception indicating the data being parsed + does not match the specification.""" + errorCodes = ("Data invalid") + + +class BMObjectExpiredError(Exception): + """Exception indicating the object's lifetime has expired.""" + errorCodes = ("Object expired") + + +class BMObjectUnwantedStreamError(Exception): + """Exception indicating the object is in a stream + we didn't advertise as being interested in.""" + errorCodes = ("Object in unwanted stream") + + +class BMObjectInvalidError(Exception): + """The object's data does not match object specification.""" + errorCodes = ("Invalid object") + + +class BMObjectAlreadyHaveError(Exception): + """We received a duplicate object (one we already have)""" + errorCodes = ("Already have this object") + + +class BMObject(object): # pylint: disable=too-many-instance-attributes + """Bitmessage Object as a class.""" + + # max TTL, 28 days and 3 hours + maxTTL = 28 * 24 * 60 * 60 + 10800 + # min TTL, 3 hour (in the past + minTTL = -3600 + + def __init__( + self, + nonce, + expiresTime, + objectType, + version, + streamNumber, + data, + payloadOffset + ): # pylint: disable=too-many-arguments + self.nonce = nonce + self.expiresTime = expiresTime + self.objectType = objectType + self.version = version + self.streamNumber = streamNumber + self.inventoryHash = calculateInventoryHash(data) + # copy to avoid memory issues + self.data = bytearray(data) + self.tag = self.data[payloadOffset:payloadOffset + 32] + + def checkProofOfWorkSufficient(self): + """Perform a proof of work check for sufficiency.""" + # Let us check to make sure that the proof of work is sufficient. + if not protocol.isProofOfWorkSufficient(self.data): + logger.info('Proof of work is insufficient.') + raise BMObjectInsufficientPOWError() + + def checkEOLSanity(self): + """Check if object's lifetime + isn't ridiculously far in the past or future.""" + # EOL sanity check + if self.expiresTime - int(time.time()) > BMObject.maxTTL: + logger.info( + 'This object\'s End of Life time is too far in the future.' + ' Ignoring it. Time is %i', self.expiresTime) + # .. todo:: remove from download queue + raise BMObjectExpiredError() + + if self.expiresTime - int(time.time()) < BMObject.minTTL: + logger.info( + 'This object\'s End of Life time was too long ago.' + ' Ignoring the object. Time is %i', self.expiresTime) + # .. todo:: remove from download queue + raise BMObjectExpiredError() + + def checkStream(self): + """Check if object's stream matches streams we are interested in""" + if self.streamNumber not in state.streamsInWhichIAmParticipating: + logger.debug( + 'The streamNumber %i isn\'t one we are interested in.', + self.streamNumber) + raise BMObjectUnwantedStreamError() + + def checkAlreadyHave(self): + """ + Check if we already have the object + (so that we don't duplicate it in inventory + or advertise it unnecessarily) + """ + # if it's a stem duplicate, pretend we don't have it + if Dandelion().hasHash(self.inventoryHash): + return + if self.inventoryHash in Inventory(): + raise BMObjectAlreadyHaveError() + + def checkObjectByType(self): + """Call a object type specific check + (objects can have additional checks based on their types)""" + if self.objectType == protocol.OBJECT_GETPUBKEY: + self.checkGetpubkey() + elif self.objectType == protocol.OBJECT_PUBKEY: + self.checkPubkey() + elif self.objectType == protocol.OBJECT_MSG: + self.checkMessage() + elif self.objectType == protocol.OBJECT_BROADCAST: + self.checkBroadcast() + # other objects don't require other types of tests + + def checkMessage(self): # pylint: disable=no-self-use + """"Message" object type checks.""" + return + + def checkGetpubkey(self): + """"Getpubkey" object type checks.""" + if len(self.data) < 42: + logger.info( + 'getpubkey message doesn\'t contain enough data. Ignoring.') + raise BMObjectInvalidError() + + def checkPubkey(self): + """"Pubkey" object type checks.""" + # sanity check + if len(self.data) < 146 or len(self.data) > 440: + logger.info('pubkey object too short or too long. Ignoring.') + raise BMObjectInvalidError() + + def checkBroadcast(self): + """"Broadcast" object type checks.""" + if len(self.data) < 180: + logger.debug( + 'The payload length of this broadcast' + ' packet is unreasonably low. Someone is probably' + ' trying funny business. Ignoring message.') + raise BMObjectInvalidError() + + # this isn't supported anymore + if self.version < 2: + raise BMObjectInvalidError() diff --git a/src/tests/mock/pybitmessage/network/bmproto.py b/src/tests/mock/pybitmessage/network/bmproto.py new file mode 100644 index 00000000..3d54b33c --- /dev/null +++ b/src/tests/mock/pybitmessage/network/bmproto.py @@ -0,0 +1,709 @@ +""" +Class BMProto defines bitmessage's network protocol workflow. +""" + +import base64 +import hashlib +import logging +import re +import socket +import struct +import time +from binascii import hexlify + +from pybitmessage import addresses +import connectionpool +import knownnodes +from pybitmessage import protocol +from pybitmessage import state +from pybitmessage.bmconfigparser import BMConfigParser +from pybitmessage.inventory import Inventory +from pybitmessage.network.advanceddispatcher import AdvancedDispatcher +from pybitmessage.network.bmobject import ( + BMObject, BMObjectAlreadyHaveError, BMObjectExpiredError, + BMObjectInsufficientPOWError, BMObjectInvalidDataError, + BMObjectInvalidError, BMObjectUnwantedStreamError +) +from pybitmessage.network.constants import ( + ADDRESS_ALIVE, MAX_MESSAGE_SIZE, MAX_OBJECT_COUNT, + MAX_OBJECT_PAYLOAD_SIZE, MAX_TIME_OFFSET +) +from pybitmessage.network.dandelion import Dandelion +from pybitmessage.network.proxy import ProxyError +from node import Node, Peer +from objectracker import ObjectTracker, missingObjects +from pybitmessage.queues import invQueue, objectProcessorQueue, portCheckerQueue +from randomtrackingdict import RandomTrackingDict + +logger = logging.getLogger('default') + + +class BMProtoError(ProxyError): + """A Bitmessage Protocol Base Error""" + errorCodes = ("Protocol error") + + +class BMProtoInsufficientDataError(BMProtoError): + """A Bitmessage Protocol Insufficient Data Error""" + errorCodes = ("Insufficient data") + + +class BMProtoExcessiveDataError(BMProtoError): + """A Bitmessage Protocol Excessive Data Error""" + errorCodes = ("Too much data") + + +class BMProto(AdvancedDispatcher, ObjectTracker): + """A parser for the Bitmessage Protocol""" + # pylint: disable=too-many-instance-attributes, too-many-public-methods + timeOffsetWrongCount = 0 + + def __init__(self, address=None, sock=None): + # pylint: disable=unused-argument, super-init-not-called + AdvancedDispatcher.__init__(self, sock) + self.isOutbound = False + # packet/connection from a local IP + self.local = False + self.pendingUpload = RandomTrackingDict() + # canonical identifier of network group + self.network_group = None + # userAgent initialization + self.userAgent = '' + + def bm_proto_reset(self): + """Reset the bitmessage object parser""" + self.magic = None + self.command = None + self.payloadLength = 0 + self.checksum = None + self.payload = None + self.invalid = False + self.payloadOffset = 0 + self.expectBytes = protocol.Header.size + self.object = None + + def state_bm_header(self): + """Process incoming header""" + self.magic, self.command, self.payloadLength, self.checksum = \ + protocol.Header.unpack(self.read_buf[:protocol.Header.size]) + self.command = self.command.rstrip('\x00') + if self.magic != 0xE9BEB4D9: + # skip 1 byte in order to sync + self.set_state("bm_header", length=1) + self.bm_proto_reset() + logger.debug('Bad magic') + if self.socket.type == socket.SOCK_STREAM: + self.close_reason = "Bad magic" + self.set_state("close") + return False + if self.payloadLength > MAX_MESSAGE_SIZE: + self.invalid = True + self.set_state( + "bm_command", + length=protocol.Header.size, expectBytes=self.payloadLength) + return True + + def state_bm_command(self): # pylint: disable=too-many-branches + """Process incoming command""" + self.payload = self.read_buf[:self.payloadLength] + if self.checksum != hashlib.sha512(self.payload).digest()[0:4]: + logger.debug('Bad checksum, ignoring') + self.invalid = True + retval = True + if not self.fullyEstablished and self.command not in ( + "error", "version", "verack"): + logger.error( + 'Received command %s before connection was fully' + ' established, ignoring', self.command) + self.invalid = True + if not self.invalid: + try: + retval = getattr( + self, "bm_command_" + str(self.command).lower())() + except AttributeError: + # unimplemented command + logger.debug('unimplemented command %s', self.command) + except BMProtoInsufficientDataError: + logger.debug('packet length too short, skipping') + except BMProtoExcessiveDataError: + logger.debug('too much data, skipping') + except BMObjectInsufficientPOWError: + logger.debug('insufficient PoW, skipping') + except BMObjectInvalidDataError: + logger.debug('object invalid data, skipping') + except BMObjectExpiredError: + logger.debug('object expired, skipping') + except BMObjectUnwantedStreamError: + logger.debug('object not in wanted stream, skipping') + except BMObjectInvalidError: + logger.debug('object invalid, skipping') + except BMObjectAlreadyHaveError: + logger.debug( + '%(host)s:%(port)i already got object, skipping', + self.destination._asdict()) + except struct.error: + logger.debug('decoding error, skipping') + elif self.socket.type == socket.SOCK_DGRAM: + # broken read, ignore + pass + else: + logger.debug('Closing due to invalid command %s', self.command) + self.close_reason = "Invalid command %s" % self.command + self.set_state("close") + return False + if retval: + self.set_state("bm_header", length=self.payloadLength) + self.bm_proto_reset() + # else assume the command requires a different state to follow + return True + + def decode_payload_string(self, length): + """Read and return `length` bytes from payload""" + value = self.payload[self.payloadOffset:self.payloadOffset + length] + self.payloadOffset += length + return value + + def decode_payload_varint(self): + """Decode a varint from the payload""" + value, offset = addresses.decodeVarint( + self.payload[self.payloadOffset:]) + self.payloadOffset += offset + return value + + def decode_payload_node(self): + """Decode node details from the payload""" + # protocol.checkIPAddress() + services, host, port = self.decode_payload_content("Q16sH") + if host[0:12] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF': + host = socket.inet_ntop(socket.AF_INET, str(host[12:16])) + elif host[0:6] == '\xfd\x87\xd8\x7e\xeb\x43': + # Onion, based on BMD/bitcoind + host = base64.b32encode(host[6:]).lower() + ".onion" + else: + host = socket.inet_ntop(socket.AF_INET6, str(host)) + if host == "": + # This can happen on Windows systems which are not 64-bit + # compatible so let us drop the IPv6 address. + host = socket.inet_ntop(socket.AF_INET, str(host[12:16])) + + return Node(services, host, port) + + # pylint: disable=too-many-branches,too-many-statements + def decode_payload_content(self, pattern="v"): + """ + Decode the payload depending on pattern: + + L = varint indicating the length of the next array + l = varint indicating the length of the next item + v = varint (or array) + H = uint16 + I = uint32 + Q = uint64 + i = net_addr (without time and stream number) + s = string + 0-9 = length of the next item + , = end of array + """ + + def decode_simple(self, char="v"): + """Decode the payload using one char pattern""" + if char == "v": + return self.decode_payload_varint() + if char == "i": + return self.decode_payload_node() + if char == "H": + self.payloadOffset += 2 + return struct.unpack(">H", self.payload[ + self.payloadOffset - 2:self.payloadOffset])[0] + if char == "I": + self.payloadOffset += 4 + return struct.unpack(">I", self.payload[ + self.payloadOffset - 4:self.payloadOffset])[0] + if char == "Q": + self.payloadOffset += 8 + return struct.unpack(">Q", self.payload[ + self.payloadOffset - 8:self.payloadOffset])[0] + return None + + size = None + isArray = False + + # size + # iterator starting from size counting to 0 + # isArray? + # subpattern + # position of parser in subpattern + # retval (array) + parserStack = [[1, 1, False, pattern, 0, []]] + + while True: + i = parserStack[-1][3][parserStack[-1][4]] + if i in "0123456789" and ( + size is None or parserStack[-1][3][parserStack[-1][4] - 1] + not in "lL"): + try: + size = size * 10 + int(i) + except TypeError: + size = int(i) + isArray = False + elif i in "Ll" and size is None: + size = self.decode_payload_varint() + isArray = i == "L" + elif size is not None: + if isArray: + parserStack.append([ + size, size, isArray, + parserStack[-1][3][parserStack[-1][4]:], 0, [] + ]) + parserStack[-2][4] = len(parserStack[-2][3]) + else: + j = 0 + for j in range( + parserStack[-1][4], len(parserStack[-1][3])): + if parserStack[-1][3][j] not in "lL0123456789": + break + parserStack.append([ + size, size, isArray, + parserStack[-1][3][parserStack[-1][4]:j + 1], 0, [] + ]) + parserStack[-2][4] += len(parserStack[-1][3]) - 1 + size = None + continue + elif i == "s": + # if parserStack[-2][2]: + # parserStack[-1][5].append(self.payload[ + # self.payloadOffset:self.payloadOffset + # + parserStack[-1][0]]) + # else: + parserStack[-1][5] = self.payload[ + self.payloadOffset:self.payloadOffset + parserStack[-1][0]] + self.payloadOffset += parserStack[-1][0] + parserStack[-1][1] = 0 + parserStack[-1][2] = True + # del parserStack[-1] + size = None + elif i in "viHIQ": + parserStack[-1][5].append(decode_simple( + self, parserStack[-1][3][parserStack[-1][4]])) + size = None + else: + size = None + for depth in range(len(parserStack) - 1, -1, -1): + parserStack[depth][4] += 1 + if parserStack[depth][4] >= len(parserStack[depth][3]): + parserStack[depth][1] -= 1 + parserStack[depth][4] = 0 + if depth > 0: + if parserStack[depth][2]: + parserStack[depth - 1][5].append( + parserStack[depth][5]) + else: + parserStack[depth - 1][5].extend( + parserStack[depth][5]) + parserStack[depth][5] = [] + if parserStack[depth][1] <= 0: + if depth == 0: + # we're done, at depth 0 counter is at 0 + # and pattern is done parsing + return parserStack[depth][5] + del parserStack[-1] + continue + break + break + if self.payloadOffset > self.payloadLength: + logger.debug( + 'Insufficient data %i/%i', + self.payloadOffset, self.payloadLength) + raise BMProtoInsufficientDataError() + + def bm_command_error(self): + """Decode an error message and log it""" + err_values = self.decode_payload_content("vvlsls") + fatalStatus = err_values[0] + # banTime = err_values[1] + # inventoryVector = err_values[2] + errorText = err_values[3] + logger.error( + '%s:%i error: %i, %s', self.destination.host, + self.destination.port, fatalStatus, errorText) + return True + + def bm_command_getdata(self): + """ + Incoming request for object(s). + If we have them and some other conditions are fulfilled, + append them to the write queue. + """ + items = self.decode_payload_content("l32s") + # skip? + now = time.time() + if now < self.skipUntil: + return True + for i in items: + self.pendingUpload[str(i)] = now + return True + + def _command_inv(self, dandelion=False): + """ + Common inv announce implementation: + both inv and dinv depending on *dandelion* kwarg + """ + items = self.decode_payload_content("l32s") + + if len(items) > MAX_OBJECT_COUNT: + logger.error( + 'Too many items in %sinv message!', 'd' if dandelion else '') + raise BMProtoExcessiveDataError() + + # ignore dinv if dandelion turned off + if dandelion and not state.dandelion: + return True + + for i in map(str, items): + if i in Inventory() and not Dandelion().hasHash(i): + continue + if dandelion and not Dandelion().hasHash(i): + Dandelion().addHash(i, self) + self.handleReceivedInventory(i) + + return True + + def bm_command_inv(self): + """Non-dandelion announce""" + return self._command_inv(False) + + def bm_command_dinv(self): + """Dandelion stem announce""" + return self._command_inv(True) + + def bm_command_object(self): + """Incoming object, process it""" + objectOffset = self.payloadOffset + nonce, expiresTime, objectType, version, streamNumber = \ + self.decode_payload_content("QQIvv") + self.object = BMObject( + nonce, expiresTime, objectType, version, streamNumber, + self.payload, self.payloadOffset) + + payload_len = len(self.payload) - self.payloadOffset + if payload_len > MAX_OBJECT_PAYLOAD_SIZE: + logger.info( + 'The payload length of this object is too large' + ' (%d bytes). Ignoring it.', payload_len) + raise BMProtoExcessiveDataError() + + try: + self.object.checkProofOfWorkSufficient() + self.object.checkEOLSanity() + self.object.checkAlreadyHave() + except (BMObjectExpiredError, BMObjectAlreadyHaveError, + BMObjectInsufficientPOWError): + BMProto.stopDownloadingObject(self.object.inventoryHash) + raise + try: + self.object.checkStream() + except BMObjectUnwantedStreamError: + acceptmismatch = BMConfigParser().get( + "inventory", "acceptmismatch") + BMProto.stopDownloadingObject( + self.object.inventoryHash, acceptmismatch) + if not acceptmismatch: + raise + + try: + self.object.checkObjectByType() + objectProcessorQueue.put(( + self.object.objectType, buffer(self.object.data))) + except BMObjectInvalidError: + BMProto.stopDownloadingObject(self.object.inventoryHash, True) + else: + try: + del missingObjects[self.object.inventoryHash] + except KeyError: + pass + + if self.object.inventoryHash in Inventory() and Dandelion().hasHash( + self.object.inventoryHash): + Dandelion().removeHash( + self.object.inventoryHash, "cycle detection") + + Inventory()[self.object.inventoryHash] = ( + self.object.objectType, self.object.streamNumber, + buffer(self.payload[objectOffset:]), self.object.expiresTime, + buffer(self.object.tag) + ) + self.handleReceivedObject( + self.object.streamNumber, self.object.inventoryHash) + invQueue.put(( + self.object.streamNumber, self.object.inventoryHash, + self.destination)) + return True + + def _decode_addr(self): + return self.decode_payload_content("LQIQ16sH") + + def bm_command_addr(self): + """Incoming addresses, process them""" + # not using services + for seenTime, stream, _, ip, port in self._decode_addr(): + ip = str(ip) + if ( + stream not in state.streamsInWhichIAmParticipating + # FIXME: should check against complete list + or ip.startswith('bootstrap') + ): + continue + decodedIP = protocol.checkIPAddress(ip) + if ( + decodedIP and time.time() - seenTime > 0 + and seenTime > time.time() - ADDRESS_ALIVE + and port > 0 + ): + peer = Peer(decodedIP, port) + + with knownnodes.knownNodesLock: + # isnew = + knownnodes.addKnownNode(stream, peer, seenTime) + + # since we don't track peers outside of knownnodes, + # only spread if in knownnodes to prevent flood + # DISABLED TO WORKAROUND FLOOD/LEAK + # if isnew: + # addrQueue.put(( + # stream, peer, seenTime, self.destination)) + return True + + def bm_command_portcheck(self): + """Incoming port check request, queue it.""" + portCheckerQueue.put(Peer(self.destination, self.peerNode.port)) + return True + + def bm_command_ping(self): + """Incoming ping, respond to it.""" + self.append_write_buf(protocol.CreatePacket('pong')) + return True + + @staticmethod + def bm_command_pong(): + """ + Incoming pong. + Ignore it. PyBitmessage pings connections after about 5 minutes + of inactivity, and leaves it to the TCP stack to handle actual + timeouts. So there is no need to do anything when a pong arrives. + """ + # nothing really + return True + + def bm_command_verack(self): + """ + Incoming verack. + If already sent my own verack, handshake is complete (except + potentially waiting for buffers to flush), so we can continue + to the main connection phase. If not sent verack yet, + continue processing. + """ + self.verackReceived = True + if not self.verackSent: + return True + self.set_state( + "tls_init" if self.isSSL else "connection_fully_established", + length=self.payloadLength, expectBytes=0) + return False + + def bm_command_version(self): + """ + Incoming version. + Parse and log, remember important things, like streams, bitfields, etc. + """ + decoded = self.decode_payload_content("IQQiiQlslv") + (self.remoteProtocolVersion, self.services, self.timestamp, + self.sockNode, self.peerNode, self.nonce, self.userAgent + ) = decoded[:7] + self.streams = decoded[7:] + self.nonce = struct.pack('>Q', self.nonce) + self.timeOffset = self.timestamp - int(time.time()) + logger.debug('remoteProtocolVersion: %i', self.remoteProtocolVersion) + logger.debug('services: 0x%08X', self.services) + logger.debug('time offset: %i', self.timeOffset) + logger.debug('my external IP: %s', self.sockNode.host) + logger.debug( + 'remote node incoming address: %s:%i', + self.destination.host, self.peerNode.port) + logger.debug('user agent: %s', self.userAgent) + logger.debug('streams: [%s]', ','.join(map(str, self.streams))) + if not self.peerValidityChecks(): + # ABORT afterwards + return True + self.append_write_buf(protocol.CreatePacket('verack')) + self.verackSent = True + ua_valid = re.match( + r'^/[a-zA-Z]+:[0-9]+\.?[\w\s\(\)\./:;-]*/$', self.userAgent) + if not ua_valid: + self.userAgent = '/INVALID:0/' + if not self.isOutbound: + self.append_write_buf(protocol.assembleVersionMessage( + self.destination.host, self.destination.port, + connectionpool.BMConnectionPool().streams, True, + nodeid=self.nodeid)) + logger.debug( + '%(host)s:%(port)i sending version', + self.destination._asdict()) + if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) + and protocol.haveSSL(not self.isOutbound)): + self.isSSL = True + if not self.verackReceived: + return True + self.set_state( + "tls_init" if self.isSSL else "connection_fully_established", + length=self.payloadLength, expectBytes=0) + return False + + # pylint: disable=too-many-return-statements + def peerValidityChecks(self): + """Check the validity of the peer""" + if self.remoteProtocolVersion < 3: + self.append_write_buf(protocol.assembleErrorMessage( + errorText="Your is using an old protocol. Closing connection.", + fatal=2)) + logger.debug( + 'Closing connection to old protocol version %s, node: %s', + self.remoteProtocolVersion, self.destination) + return False + if self.timeOffset > MAX_TIME_OFFSET: + self.append_write_buf(protocol.assembleErrorMessage( + errorText="Your time is too far in the future" + " compared to mine. Closing connection.", fatal=2)) + logger.info( + "%s's time is too far in the future (%s seconds)." + " Closing connection to it.", + self.destination, self.timeOffset) + BMProto.timeOffsetWrongCount += 1 + return False + elif self.timeOffset < -MAX_TIME_OFFSET: + self.append_write_buf(protocol.assembleErrorMessage( + errorText="Your time is too far in the past compared to mine." + " Closing connection.", fatal=2)) + logger.info( + "%s's time is too far in the past" + " (timeOffset %s seconds). Closing connection to it.", + self.destination, self.timeOffset) + BMProto.timeOffsetWrongCount += 1 + return False + else: + BMProto.timeOffsetWrongCount = 0 + if not self.streams: + self.append_write_buf(protocol.assembleErrorMessage( + errorText="We don't have shared stream interests." + " Closing connection.", fatal=2)) + logger.debug( + 'Closed connection to %s because there is no overlapping' + ' interest in streams.', self.destination) + return False + if connectionpool.BMConnectionPool().inboundConnections.get( + self.destination): + try: + if not protocol.checkSocksIP(self.destination.host): + self.append_write_buf(protocol.assembleErrorMessage( + errorText="Too many connections from your IP." + " Closing connection.", fatal=2)) + logger.debug( + 'Closed connection to %s because we are already' + ' connected to that IP.', self.destination) + return False + except Exception: # TODO: exception types + pass + if not self.isOutbound: + # incoming from a peer we're connected to as outbound, + # or server full report the same error to counter deanonymisation + if ( + Peer(self.destination.host, self.peerNode.port) + in connectionpool.BMConnectionPool().inboundConnections + or len(connectionpool.BMConnectionPool()) + > BMConfigParser().safeGetInt( + 'bitmessagesettings', 'maxtotalconnections') + + BMConfigParser().safeGetInt( + 'bitmessagesettings', 'maxbootstrapconnections') + ): + self.append_write_buf(protocol.assembleErrorMessage( + errorText="Server full, please try again later.", fatal=2)) + logger.debug( + 'Closed connection to %s due to server full' + ' or duplicate inbound/outbound.', self.destination) + return False + if connectionpool.BMConnectionPool().isAlreadyConnected(self.nonce): + self.append_write_buf(protocol.assembleErrorMessage( + errorText="I'm connected to myself. Closing connection.", + fatal=2)) + logger.debug( + "Closed connection to %s because I'm connected to myself.", + self.destination) + return False + + return True + + @staticmethod + def stopDownloadingObject(hashId, forwardAnyway=False): + """Stop downloading object *hashId*""" + for connection in connectionpool.BMConnectionPool().connections(): + try: + del connection.objectsNewToMe[hashId] + except KeyError: + pass + if not forwardAnyway: + try: + with connection.objectsNewToThemLock: + del connection.objectsNewToThem[hashId] + except KeyError: + pass + try: + del missingObjects[hashId] + except KeyError: + pass + + def handle_close(self): + """Handle close""" + self.set_state("close") + if not (self.accepting or self.connecting or self.connected): + # already disconnected + return + try: + logger.debug( + '%s:%i: closing, %s', self.destination.host, + self.destination.port, self.close_reason) + except AttributeError: + try: + logger.debug( + '%s:%i: closing', + self.destination.host, self.destination.port) + except AttributeError: + logger.debug('Disconnected socket closing') + AdvancedDispatcher.handle_close(self) + + +class BMStringParser(BMProto): + """ + A special case of BMProto used by objectProcessor to send ACK + """ + def __init__(self): + super(BMStringParser, self).__init__() + self.destination = Peer('127.0.0.1', 8444) + self.payload = None + ObjectTracker.__init__(self) + + def send_data(self, data): + """Send object given by the data string""" + # This class is introduced specially for ACK sending, please + # change log strings if you are going to use it for something else + self.bm_proto_reset() + self.payload = data + try: + self.bm_command_object() + except BMObjectAlreadyHaveError: + pass # maybe the same msg received on different nodes + except BMObjectExpiredError: + logger.debug( + 'Sending ACK failure (expired): %s', hexlify(data)) + except Exception as e: + logger.debug( + 'Exception of type %s while sending ACK', + type(e), exc_info=True) diff --git a/src/tests/mock/pybitmessage/network/connectionchooser.py b/src/tests/mock/pybitmessage/network/connectionchooser.py new file mode 100644 index 00000000..edac86b7 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/connectionchooser.py @@ -0,0 +1,77 @@ +""" +Select which node to connect to +""" +# pylint: disable=too-many-branches +import logging +import random # nosec + +import knownnodes +from pybitmessage import protocol +from pybitmessage import state +from pybitmessage.bmconfigparser import BMConfigParser +from pybitmessage.queues import queue, portCheckerQueue + +logger = logging.getLogger('default') + + +def getDiscoveredPeer(): + """Get a peer from the local peer discovery list""" + try: + peer = random.choice(state.discoveredPeers.keys()) + except (IndexError, KeyError): + raise ValueError + try: + del state.discoveredPeers[peer] + except KeyError: + pass + return peer + + +def chooseConnection(stream): + """Returns an appropriate connection""" + haveOnion = BMConfigParser().safeGet( + "bitmessagesettings", "socksproxytype")[0:5] == 'SOCKS' + onionOnly = BMConfigParser().safeGetBoolean( + "bitmessagesettings", "onionservicesonly") + try: + retval = portCheckerQueue.get(False) + portCheckerQueue.task_done() + return retval + except queue.Empty: + pass + # with a probability of 0.5, connect to a discovered peer + if random.choice((False, True)) and not haveOnion: + # discovered peers are already filtered by allowed streams + return getDiscoveredPeer() + for _ in range(50): + peer = random.choice(knownnodes.knownNodes[stream].keys()) + try: + peer_info = knownnodes.knownNodes[stream][peer] + if peer_info.get('self'): + continue + rating = peer_info["rating"] + except TypeError: + logger.warning('Error in %s', peer) + rating = 0 + if haveOnion: + # do not connect to raw IP addresses + # --keep all traffic within Tor overlay + if onionOnly and not peer.host.endswith('.onion'): + continue + # onion addresses have a higher priority when SOCKS + if peer.host.endswith('.onion') and rating > 0: + rating = 1 + # TODO: need better check + elif not peer.host.startswith('bootstrap'): + encodedAddr = protocol.encodeHost(peer.host) + # don't connect to local IPs when using SOCKS + if not protocol.checkIPAddress(encodedAddr, False): + continue + if rating > 1: + rating = 1 + try: + if 0.05 / (1.0 - rating) > random.random(): + return peer + except ZeroDivisionError: + return peer + raise ValueError diff --git a/src/tests/mock/pybitmessage/network/connectionpool.py b/src/tests/mock/pybitmessage/network/connectionpool.py new file mode 100644 index 00000000..4b67fa3c --- /dev/null +++ b/src/tests/mock/pybitmessage/network/connectionpool.py @@ -0,0 +1,405 @@ +""" +`BMConnectionPool` class definition +""" +import errno +import logging +import re +import socket +import sys +import time + +import asyncore_pollchoose as asyncore +from pybitmessage import helper_random +import knownnodes +from pybitmessage import protocol +from pybitmessage import state +from pybitmessage.bmconfigparser import BMConfigParser +from connectionchooser import chooseConnection +from node import Peer +from proxy import Proxy +from singleton import Singleton +from tcp import ( + bootstrap, Socks4aBMConnection, Socks5BMConnection, + TCPConnection, TCPServer) +from udp import UDPSocket + +logger = logging.getLogger('default') + + +@Singleton +class BMConnectionPool(object): + """Pool of all existing connections""" + # pylint: disable=too-many-instance-attributes + + trustedPeer = None + """ + If the trustedpeer option is specified in keys.dat then this will + contain a Peer which will be connected to instead of using the + addresses advertised by other peers. + + The expected use case is where the user has a trusted server where + they run a Bitmessage daemon permanently. If they then run a second + instance of the client on a local machine periodically when they want + to check for messages it will sync with the network a lot faster + without compromising security. + """ + + def __init__(self): + asyncore.set_rates( + BMConfigParser().safeGetInt( + "bitmessagesettings", "maxdownloadrate"), + BMConfigParser().safeGetInt( + "bitmessagesettings", "maxuploadrate") + ) + self.outboundConnections = {} + self.inboundConnections = {} + self.listeningSockets = {} + self.udpSockets = {} + self.streams = [] + self._lastSpawned = 0 + self._spawnWait = 2 + self._bootstrapped = False + + trustedPeer = BMConfigParser().safeGet( + 'bitmessagesettings', 'trustedpeer') + try: + if trustedPeer: + host, port = trustedPeer.split(':') + self.trustedPeer = Peer(host, int(port)) + except ValueError: + sys.exit( + 'Bad trustedpeer config setting! It should be set as' + ' trustedpeer=:' + ) + + def __len__(self): + return len(self.outboundConnections) + len(self.inboundConnections) + + def connections(self): + """ + Shortcut for combined list of connections from + `inboundConnections` and `outboundConnections` dicts + """ + return self.inboundConnections.values() + self.outboundConnections.values() + + def establishedConnections(self): + """Shortcut for list of connections having fullyEstablished == True""" + return [ + x for x in self.connections() if x.fullyEstablished] + + def connectToStream(self, streamNumber): + """Connect to a bitmessage stream""" + self.streams.append(streamNumber) + state.streamsInWhichIAmParticipating.append(streamNumber) + + def getConnectionByAddr(self, addr): + """ + Return an (existing) connection object based on a `Peer` object + (IP and port) + """ + try: + return self.inboundConnections[addr] + except KeyError: + pass + try: + return self.inboundConnections[addr.host] + except (KeyError, AttributeError): + pass + try: + return self.outboundConnections[addr] + except KeyError: + pass + try: + return self.udpSockets[addr.host] + except (KeyError, AttributeError): + pass + raise KeyError + + def isAlreadyConnected(self, nodeid): + """Check if we're already connected to this peer""" + for i in self.connections(): + try: + if nodeid == i.nodeid: + return True + except AttributeError: + pass + return False + + def addConnection(self, connection): + """Add a connection object to our internal dict""" + if isinstance(connection, UDPSocket): + return + if connection.isOutbound: + self.outboundConnections[connection.destination] = connection + else: + if connection.destination.host in self.inboundConnections: + self.inboundConnections[connection.destination] = connection + else: + self.inboundConnections[connection.destination.host] = \ + connection + + def removeConnection(self, connection): + """Remove a connection from our internal dict""" + if isinstance(connection, UDPSocket): + del self.udpSockets[connection.listening.host] + elif isinstance(connection, TCPServer): + del self.listeningSockets[Peer( + connection.destination.host, connection.destination.port)] + elif connection.isOutbound: + try: + del self.outboundConnections[connection.destination] + except KeyError: + pass + else: + try: + del self.inboundConnections[connection.destination] + except KeyError: + try: + del self.inboundConnections[connection.destination.host] + except KeyError: + pass + connection.handle_close() + + @staticmethod + def getListeningIP(): + """What IP are we supposed to be listening on?""" + if BMConfigParser().safeGet( + "bitmessagesettings", "onionhostname").endswith(".onion"): + host = BMConfigParser().safeGet( + "bitmessagesettings", "onionbindip") + else: + host = '127.0.0.1' + if ( + BMConfigParser().safeGetBoolean("bitmessagesettings", "sockslisten") + or BMConfigParser().safeGet("bitmessagesettings", "socksproxytype") + == "none" + ): + # python doesn't like bind + INADDR_ANY? + # host = socket.INADDR_ANY + host = BMConfigParser().get("network", "bind") + return host + + def startListening(self, bind=None): + """Open a listening socket and start accepting connections on it""" + if bind is None: + bind = self.getListeningIP() + port = BMConfigParser().safeGetInt("bitmessagesettings", "port") + # correct port even if it changed + ls = TCPServer(host=bind, port=port) + self.listeningSockets[ls.destination] = ls + + def startUDPSocket(self, bind=None): + """ + Open an UDP socket. Depending on settings, it can either only + accept incoming UDP packets, or also be able to send them. + """ + if bind is None: + host = self.getListeningIP() + udpSocket = UDPSocket(host=host, announcing=True) + else: + if bind is False: + udpSocket = UDPSocket(announcing=False) + else: + udpSocket = UDPSocket(host=bind, announcing=True) + self.udpSockets[udpSocket.listening.host] = udpSocket + + def startBootstrappers(self): + """Run the process of resolving bootstrap hostnames""" + proxy_type = BMConfigParser().safeGet( + 'bitmessagesettings', 'socksproxytype') + # A plugins may be added here + hostname = None + if not proxy_type or proxy_type == 'none': + connection_base = TCPConnection + elif proxy_type == 'SOCKS5': + connection_base = Socks5BMConnection + hostname = helper_random.randomchoice([ + 'quzwelsuziwqgpt2.onion', None + ]) + elif proxy_type == 'SOCKS4a': + connection_base = Socks4aBMConnection # FIXME: I cannot test + else: + # This should never happen because socksproxytype setting + # is handled in bitmessagemain before starting the connectionpool + return + + bootstrapper = bootstrap(connection_base) + if not hostname: + port = helper_random.randomchoice([8080, 8444]) + hostname = 'bootstrap%s.bitmessage.org' % port + else: + port = 8444 + self.addConnection(bootstrapper(hostname, port)) + + def loop(self): # pylint: disable=too-many-branches,too-many-statements + """Main Connectionpool's loop""" + # pylint: disable=too-many-locals + # defaults to empty loop if outbound connections are maxed + spawnConnections = False + acceptConnections = True + if BMConfigParser().safeGetBoolean( + 'bitmessagesettings', 'dontconnect'): + acceptConnections = False + elif BMConfigParser().safeGetBoolean( + 'bitmessagesettings', 'sendoutgoingconnections'): + spawnConnections = True + socksproxytype = BMConfigParser().safeGet( + 'bitmessagesettings', 'socksproxytype', '') + onionsocksproxytype = BMConfigParser().safeGet( + 'bitmessagesettings', 'onionsocksproxytype', '') + if ( + socksproxytype[:5] == 'SOCKS' + and not BMConfigParser().safeGetBoolean( + 'bitmessagesettings', 'sockslisten') + and '.onion' not in BMConfigParser().safeGet( + 'bitmessagesettings', 'onionhostname', '') + ): + acceptConnections = False + + # pylint: disable=too-many-nested-blocks + if spawnConnections: + if not knownnodes.knownNodesActual: + self.startBootstrappers() + knownnodes.knownNodesActual = True + if not self._bootstrapped: + self._bootstrapped = True + Proxy.proxy = ( + BMConfigParser().safeGet( + 'bitmessagesettings', 'sockshostname'), + BMConfigParser().safeGetInt( + 'bitmessagesettings', 'socksport') + ) + # TODO AUTH + # TODO reset based on GUI settings changes + try: + if not onionsocksproxytype.startswith("SOCKS"): + raise ValueError + Proxy.onion_proxy = ( + BMConfigParser().safeGet( + 'network', 'onionsockshostname', None), + BMConfigParser().safeGet( + 'network', 'onionsocksport', None) + ) + except ValueError: + Proxy.onion_proxy = None + established = sum( + 1 for c in self.outboundConnections.values() + if (c.connected and c.fullyEstablished)) + pending = len(self.outboundConnections) - established + if established < BMConfigParser().safeGetInt( + 'bitmessagesettings', 'maxoutboundconnections'): + for i in range( + state.maximumNumberOfHalfOpenConnections - pending): + try: + chosen = self.trustedPeer or chooseConnection( + helper_random.randomchoice(self.streams)) + except ValueError: + continue + if chosen in self.outboundConnections: + continue + if chosen.host in self.inboundConnections: + continue + # don't connect to self + if chosen in state.ownAddresses: + continue + # don't connect to the hosts from the same + # network group, defense against sibyl attacks + host_network_group = protocol.network_group( + chosen.host) + same_group = False + for j in self.outboundConnections.values(): + if host_network_group == j.network_group: + same_group = True + if chosen.host == j.destination.host: + knownnodes.decreaseRating(chosen) + break + if same_group: + continue + + try: + if chosen.host.endswith(".onion") and Proxy.onion_proxy: + if onionsocksproxytype == "SOCKS5": + self.addConnection(Socks5BMConnection(chosen)) + elif onionsocksproxytype == "SOCKS4a": + self.addConnection(Socks4aBMConnection(chosen)) + elif socksproxytype == "SOCKS5": + self.addConnection(Socks5BMConnection(chosen)) + elif socksproxytype == "SOCKS4a": + self.addConnection(Socks4aBMConnection(chosen)) + else: + self.addConnection(TCPConnection(chosen)) + except socket.error as e: + if e.errno == errno.ENETUNREACH: + continue + + self._lastSpawned = time.time() + else: + for i in self.connections(): + # FIXME: rating will be increased after next connection + i.handle_close() + + if acceptConnections: + if not self.listeningSockets: + if BMConfigParser().safeGet('network', 'bind') == '': + self.startListening() + else: + for bind in re.sub( + r'[^\w.]+', ' ', + BMConfigParser().safeGet('network', 'bind') + ).split(): + self.startListening(bind) + logger.info('Listening for incoming connections.') + if not self.udpSockets: + if BMConfigParser().safeGet('network', 'bind') == '': + self.startUDPSocket() + else: + for bind in re.sub( + r'[^\w.]+', ' ', + BMConfigParser().safeGet('network', 'bind') + ).split(): + self.startUDPSocket(bind) + self.startUDPSocket(False) + logger.info('Starting UDP socket(s).') + else: + if self.listeningSockets: + for i in self.listeningSockets.values(): + i.close_reason = "Stopping listening" + i.accepting = i.connecting = i.connected = False + logger.info('Stopped listening for incoming connections.') + if self.udpSockets: + for i in self.udpSockets.values(): + i.close_reason = "Stopping UDP socket" + i.accepting = i.connecting = i.connected = False + logger.info('Stopped udp sockets.') + + loopTime = float(self._spawnWait) + if self._lastSpawned < time.time() - self._spawnWait: + loopTime = 2.0 + asyncore.loop(timeout=loopTime, count=1000) + + reaper = [] + for i in self.connections(): + minTx = time.time() - 20 + if i.fullyEstablished: + minTx -= 300 - 20 + if i.lastTx < minTx: + if i.fullyEstablished: + i.append_write_buf(protocol.CreatePacket('ping')) + else: + i.close_reason = "Timeout (%is)" % ( + time.time() - i.lastTx) + i.set_state("close") + for i in ( + self.connections() + + self.listeningSockets.values() + self.udpSockets.values() + ): + if not (i.accepting or i.connecting or i.connected): + reaper.append(i) + else: + try: + if i.state == "close": + reaper.append(i) + except AttributeError: + pass + for i in reaper: + self.removeConnection(i) diff --git a/src/tests/mock/pybitmessage/network/constants.py b/src/tests/mock/pybitmessage/network/constants.py new file mode 100644 index 00000000..f8f4120f --- /dev/null +++ b/src/tests/mock/pybitmessage/network/constants.py @@ -0,0 +1,17 @@ +""" +Network protocol constants +""" + + +#: address is online if online less than this many seconds ago +ADDRESS_ALIVE = 10800 +#: protocol specification says max 1000 addresses in one addr command +MAX_ADDR_COUNT = 1000 +#: ~1.6 MB which is the maximum possible size of an inv message. +MAX_MESSAGE_SIZE = 1600100 +#: 2**18 = 256kB is the maximum size of an object payload +MAX_OBJECT_PAYLOAD_SIZE = 2**18 +#: protocol specification says max 50000 objects in one inv command +MAX_OBJECT_COUNT = 50000 +#: maximum time offset +MAX_TIME_OFFSET = 3600 diff --git a/src/tests/mock/pybitmessage/network/dandelion.py b/src/tests/mock/pybitmessage/network/dandelion.py new file mode 100644 index 00000000..03f45bd7 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/dandelion.py @@ -0,0 +1,196 @@ +""" +Dandelion class definition, tracks stages +""" +import logging +from collections import namedtuple +from random import choice, expovariate, sample +from threading import RLock +from time import time + +import connectionpool +import state +from queues import invQueue +from singleton import Singleton + +# randomise routes after 600 seconds +REASSIGN_INTERVAL = 600 + +# trigger fluff due to expiration +FLUFF_TRIGGER_FIXED_DELAY = 10 +FLUFF_TRIGGER_MEAN_DELAY = 30 + +MAX_STEMS = 2 + +Stem = namedtuple('Stem', ['child', 'stream', 'timeout']) + +logger = logging.getLogger('default') + + +@Singleton +class Dandelion: # pylint: disable=old-style-class + """Dandelion class for tracking stem/fluff stages.""" + def __init__(self): + # currently assignable child stems + self.stem = [] + # currently assigned parent <-> child mappings + self.nodeMap = {} + # currently existing objects in stem mode + self.hashMap = {} + # when to rerandomise routes + self.refresh = time() + REASSIGN_INTERVAL + self.lock = RLock() + + @staticmethod + def poissonTimeout(start=None, average=0): + """Generate deadline using Poisson distribution""" + if start is None: + start = time() + if average == 0: + average = FLUFF_TRIGGER_MEAN_DELAY + return start + expovariate(1.0 / average) + FLUFF_TRIGGER_FIXED_DELAY + + def addHash(self, hashId, source=None, stream=1): + """Add inventory vector to dandelion stem""" + if not state.dandelion: + return + with self.lock: + self.hashMap[hashId] = Stem( + self.getNodeStem(source), + stream, + self.poissonTimeout()) + + def setHashStream(self, hashId, stream=1): + """ + Update stream for inventory vector (as inv/dinv commands don't + include streams, we only learn this after receiving the object) + """ + with self.lock: + if hashId in self.hashMap: + self.hashMap[hashId] = Stem( + self.hashMap[hashId].child, + stream, + self.poissonTimeout()) + + def removeHash(self, hashId, reason="no reason specified"): + """Switch inventory vector from stem to fluff mode""" + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + '%s entering fluff mode due to %s.', + ''.join('%02x' % ord(i) for i in hashId), reason) + with self.lock: + try: + del self.hashMap[hashId] + except KeyError: + pass + + def hasHash(self, hashId): + """Is inventory vector in stem mode?""" + return hashId in self.hashMap + + def objectChildStem(self, hashId): + """Child (i.e. next) node for an inventory vector during stem mode""" + return self.hashMap[hashId].child + + def maybeAddStem(self, connection): + """ + If we had too few outbound connections, add the current one to the + current stem list. Dandelion as designed by the authors should + always have two active stem child connections. + """ + # fewer than MAX_STEMS outbound connections at last reshuffle? + with self.lock: + if len(self.stem) < MAX_STEMS: + self.stem.append(connection) + for k in (k for k, v in self.nodeMap.iteritems() if v is None): + self.nodeMap[k] = connection + for k, v in { + k: v for k, v in self.hashMap.iteritems() + if v.child is None + }.iteritems(): + self.hashMap[k] = Stem( + connection, v.stream, self.poissonTimeout()) + invQueue.put((v.stream, k, v.child)) + + def maybeRemoveStem(self, connection): + """ + Remove current connection from the stem list (called e.g. when + a connection is closed). + """ + # is the stem active? + with self.lock: + if connection in self.stem: + self.stem.remove(connection) + # active mappings to pointing to the removed node + for k in ( + k for k, v in self.nodeMap.iteritems() + if v == connection + ): + self.nodeMap[k] = None + for k, v in { + k: v for k, v in self.hashMap.iteritems() + if v.child == connection + }.iteritems(): + self.hashMap[k] = Stem( + None, v.stream, self.poissonTimeout()) + + def pickStem(self, parent=None): + """ + Pick a random active stem, but not the parent one + (the one where an object came from) + """ + try: + # pick a random from available stems + stem = choice(range(len(self.stem))) + if self.stem[stem] == parent: + # one stem available and it's the parent + if len(self.stem) == 1: + return None + # else, pick the other one + return self.stem[1 - stem] + # all ok + return self.stem[stem] + except IndexError: + # no stems available + return None + + def getNodeStem(self, node=None): + """ + Return child stem node for a given parent stem node + (the mapping is static for about 10 minutes, then it reshuffles) + """ + with self.lock: + try: + return self.nodeMap[node] + except KeyError: + self.nodeMap[node] = self.pickStem(node) + return self.nodeMap[node] + + def expire(self): + """Switch expired objects from stem to fluff mode""" + with self.lock: + deadline = time() + toDelete = [ + [v.stream, k, v.child] for k, v in self.hashMap.iteritems() + if v.timeout < deadline + ] + + for row in toDelete: + self.removeHash(row[1], 'expiration') + invQueue.put(row) + return toDelete + + def reRandomiseStems(self): + """Re-shuffle stem mapping (parent <-> child pairs)""" + with self.lock: + try: + # random two connections + self.stem = sample( + connectionpool.BMConnectionPool( + ).outboundConnections.values(), MAX_STEMS) + # not enough stems available + except ValueError: + self.stem = connectionpool.BMConnectionPool( + ).outboundConnections.values() + self.nodeMap = {} + # hashMap stays to cater for pending stems + self.refresh = time() + REASSIGN_INTERVAL diff --git a/src/tests/mock/pybitmessage/network/downloadthread.py b/src/tests/mock/pybitmessage/network/downloadthread.py new file mode 100644 index 00000000..0ae83b5b --- /dev/null +++ b/src/tests/mock/pybitmessage/network/downloadthread.py @@ -0,0 +1,84 @@ +""" +`DownloadThread` class definition +""" +import time + +import addresses +import helper_random +import protocol +from dandelion import Dandelion +from inventory import Inventory +from network.connectionpool import BMConnectionPool +from objectracker import missingObjects +from threads import StoppableThread + + +class DownloadThread(StoppableThread): + """Thread-based class for downloading from connections""" + minPending = 200 + maxRequestChunk = 1000 + requestTimeout = 60 + cleanInterval = 60 + requestExpires = 3600 + + def __init__(self): + super(DownloadThread, self).__init__(name="Downloader") + self.lastCleaned = time.time() + + def cleanPending(self): + """Expire pending downloads eventually""" + deadline = time.time() - self.requestExpires + try: + toDelete = [ + k for k, v in missingObjects.iteritems() + if v < deadline] + except RuntimeError: + pass + else: + for i in toDelete: + del missingObjects[i] + self.lastCleaned = time.time() + + def run(self): + while not self._stopped: + requested = 0 + # Choose downloading peers randomly + connections = BMConnectionPool().establishedConnections() + helper_random.randomshuffle(connections) + requestChunk = max(int( + min(self.maxRequestChunk, len(missingObjects)) + / len(connections)), 1) if connections else 1 + + for i in connections: + now = time.time() + # avoid unnecessary delay + if i.skipUntil >= now: + continue + try: + request = i.objectsNewToMe.randomKeys(requestChunk) + except KeyError: + continue + payload = bytearray() + chunkCount = 0 + for chunk in request: + if chunk in Inventory() and not Dandelion().hasHash(chunk): + try: + del i.objectsNewToMe[chunk] + except KeyError: + pass + continue + payload.extend(chunk) + chunkCount += 1 + missingObjects[chunk] = now + if not chunkCount: + continue + payload[0:0] = addresses.encodeVarint(chunkCount) + i.append_write_buf(protocol.CreatePacket('getdata', payload)) + self.logger.debug( + '%s:%i Requesting %i objects', + i.destination.host, i.destination.port, chunkCount) + requested += chunkCount + if time.time() >= self.lastCleaned + self.cleanInterval: + self.cleanPending() + if not requested: + self.stop.wait(1) diff --git a/src/tests/mock/pybitmessage/network/http.py b/src/tests/mock/pybitmessage/network/http.py new file mode 100644 index 00000000..d7a938fa --- /dev/null +++ b/src/tests/mock/pybitmessage/network/http.py @@ -0,0 +1,89 @@ +import socket + +from advanceddispatcher import AdvancedDispatcher +import asyncore_pollchoose as asyncore +from proxy import ProxyError +from socks5 import Socks5Connection, Socks5Resolver +from socks4a import Socks4aConnection, Socks4aResolver + + +class HttpError(ProxyError): + pass + + +class HttpConnection(AdvancedDispatcher): + def __init__(self, host, path="/"): # pylint: disable=redefined-outer-name + AdvancedDispatcher.__init__(self) + self.path = path + self.destination = (host, 80) + self.create_socket(socket.AF_INET, socket.SOCK_STREAM) + self.connect(self.destination) + print("connecting in background to %s:%i" % self.destination) + + def state_init(self): + self.append_write_buf( + "GET %s HTTP/1.1\r\nHost: %s\r\nConnection: close\r\n\r\n" % ( + self.path, self.destination[0])) + print("Sending %ib" % len(self.write_buf)) + self.set_state("http_request_sent", 0) + return False + + def state_http_request_sent(self): + if self.read_buf: + print("Received %ib" % len(self.read_buf)) + self.read_buf = b"" + if not self.connected: + self.set_state("close", 0) + return False + + +class Socks5HttpConnection(Socks5Connection, HttpConnection): + def __init__(self, host, path="/"): # pylint: disable=super-init-not-called, redefined-outer-name + self.path = path + Socks5Connection.__init__(self, address=(host, 80)) + + def state_socks_handshake_done(self): + HttpConnection.state_init(self) + return False + + +class Socks4aHttpConnection(Socks4aConnection, HttpConnection): + def __init__(self, host, path="/"): # pylint: disable=super-init-not-called, redefined-outer-name + Socks4aConnection.__init__(self, address=(host, 80)) + self.path = path + + def state_socks_handshake_done(self): + HttpConnection.state_init(self) + return False + + +if __name__ == "__main__": + # initial fill + for host in ("bootstrap8080.bitmessage.org", "bootstrap8444.bitmessage.org"): + proxy = Socks5Resolver(host=host) + while asyncore.socket_map: + print("loop %s, len %i" % (proxy.state, len(asyncore.socket_map))) + asyncore.loop(timeout=1, count=1) + proxy.resolved() + + proxy = Socks4aResolver(host=host) + while asyncore.socket_map: + print("loop %s, len %i" % (proxy.state, len(asyncore.socket_map))) + asyncore.loop(timeout=1, count=1) + proxy.resolved() + + for host in ("bitmessage.org",): + direct = HttpConnection(host) + while asyncore.socket_map: + # print "loop, state = %s" % (direct.state) + asyncore.loop(timeout=1, count=1) + + proxy = Socks5HttpConnection(host) + while asyncore.socket_map: + # print "loop, state = %s" % (proxy.state) + asyncore.loop(timeout=1, count=1) + + proxy = Socks4aHttpConnection(host) + while asyncore.socket_map: + # print "loop, state = %s" % (proxy.state) + asyncore.loop(timeout=1, count=1) diff --git a/src/tests/mock/pybitmessage/network/httpd.py b/src/tests/mock/pybitmessage/network/httpd.py new file mode 100644 index 00000000..b69ffa99 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/httpd.py @@ -0,0 +1,161 @@ +""" +src/network/httpd.py +======================= +""" +import asyncore +import socket + +from tls import TLSHandshake + + +class HTTPRequestHandler(asyncore.dispatcher): + """Handling HTTP request""" + response = """HTTP/1.0 200 OK\r + Date: Sun, 23 Oct 2016 18:02:00 GMT\r + Content-Type: text/html; charset=UTF-8\r + Content-Encoding: UTF-8\r + Content-Length: 136\r + Last-Modified: Wed, 08 Jan 2003 23:11:55 GMT\r + Server: Apache/1.3.3.7 (Unix) (Red-Hat/Linux)\r + ETag: "3f80f-1b6-3e1cb03b"\r + Accept-Ranges: bytes\r + Connection: close\r + \r + + + An Example Page + + + Hello World, this is a very simple HTML document. + + """ + + def __init__(self, sock): + if not hasattr(self, '_map'): + asyncore.dispatcher.__init__(self, sock) + self.inbuf = "" + self.ready = True + self.busy = False + self.respos = 0 + + def handle_close(self): + self.close() + + def readable(self): + return self.ready + + def writable(self): + return self.busy + + def handle_read(self): + self.inbuf += self.recv(8192) + if self.inbuf[-4:] == "\r\n\r\n": + self.busy = True + self.ready = False + self.inbuf = "" + elif self.inbuf == "": + pass + + def handle_write(self): + if self.busy and self.respos < len(HTTPRequestHandler.response): + written = 0 + written = self.send(HTTPRequestHandler.response[self.respos:65536]) + self.respos += written + elif self.busy: + self.busy = False + self.ready = True + self.close() + + +class HTTPSRequestHandler(HTTPRequestHandler, TLSHandshake): + """Handling HTTPS request""" + def __init__(self, sock): + if not hasattr(self, '_map'): + asyncore.dispatcher.__init__(self, sock) # pylint: disable=non-parent-init-called + # self.tlsDone = False + TLSHandshake.__init__( + self, + sock=sock, + certfile='/home/shurdeek/src/PyBitmessage/src/sslkeys/cert.pem', + keyfile='/home/shurdeek/src/PyBitmessage/src/sslkeys/key.pem', + server_side=True) + HTTPRequestHandler.__init__(self, sock) + + def handle_connect(self): + TLSHandshake.handle_connect(self) + + def handle_close(self): + if self.tlsDone: + HTTPRequestHandler.close(self) + else: + TLSHandshake.close(self) + + def readable(self): + if self.tlsDone: + return HTTPRequestHandler.readable(self) + return TLSHandshake.readable(self) + + def handle_read(self): + if self.tlsDone: + HTTPRequestHandler.handle_read(self) + else: + TLSHandshake.handle_read(self) + + def writable(self): + if self.tlsDone: + return HTTPRequestHandler.writable(self) + return TLSHandshake.writable(self) + + def handle_write(self): + if self.tlsDone: + HTTPRequestHandler.handle_write(self) + else: + TLSHandshake.handle_write(self) + + +class HTTPServer(asyncore.dispatcher): + """Handling HTTP Server""" + port = 12345 + + def __init__(self): + if not hasattr(self, '_map'): + asyncore.dispatcher.__init__(self) + self.create_socket(socket.AF_INET, socket.SOCK_STREAM) + self.set_reuse_addr() + self.bind(('127.0.0.1', HTTPServer.port)) + self.connections = 0 + self.listen(5) + + def handle_accept(self): + pair = self.accept() + if pair is not None: + sock, addr = pair + # print 'Incoming connection from %s' % repr(addr) + self.connections += 1 + # if self.connections % 1000 == 0: + # print "Processed %i connections, active %i" % (self.connections, len(asyncore.socket_map)) + HTTPRequestHandler(sock) + + +class HTTPSServer(HTTPServer): + """Handling HTTPS Server""" + port = 12345 + + def __init__(self): + if not hasattr(self, '_map'): + HTTPServer.__init__(self) + + def handle_accept(self): + pair = self.accept() + if pair is not None: + sock, addr = pair + # print 'Incoming connection from %s' % repr(addr) + self.connections += 1 + # if self.connections % 1000 == 0: + # print "Processed %i connections, active %i" % (self.connections, len(asyncore.socket_map)) + HTTPSRequestHandler(sock) + + +if __name__ == "__main__": + client = HTTPSServer() + asyncore.loop() diff --git a/src/tests/mock/pybitmessage/network/https.py b/src/tests/mock/pybitmessage/network/https.py new file mode 100644 index 00000000..a7b8b57c --- /dev/null +++ b/src/tests/mock/pybitmessage/network/https.py @@ -0,0 +1,71 @@ +import asyncore + +from http import HTTPClient +from tls import TLSHandshake + +""" +self.sslSock = ssl.wrap_socket( + self.sock, + keyfile=os.path.join(paths.codePath(), 'sslkeys', 'key.pem'), + certfile=os.path.join(paths.codePath(), 'sslkeys', 'cert.pem'), + server_side=not self.initiatedConnection, + ssl_version=ssl.PROTOCOL_TLSv1, + do_handshake_on_connect=False, + ciphers='AECDH-AES256-SHA') +""" + + +class HTTPSClient(HTTPClient, TLSHandshake): + def __init__(self, host, path): + if not hasattr(self, '_map'): + asyncore.dispatcher.__init__(self) + self.tlsDone = False + """ + TLSHandshake.__init__( + self, + address=(host, 443), + certfile='/home/shurdeek/src/PyBitmessage/sslsrc/keys/cert.pem', + keyfile='/home/shurdeek/src/PyBitmessage/src/sslkeys/key.pem', + server_side=False, + ciphers='AECDH-AES256-SHA') + """ + HTTPClient.__init__(self, host, path, connect=False) + TLSHandshake.__init__(self, address=(host, 443), server_side=False) + + def handle_connect(self): + TLSHandshake.handle_connect(self) + + def handle_close(self): + if self.tlsDone: + HTTPClient.close(self) + else: + TLSHandshake.close(self) + + def readable(self): + if self.tlsDone: + return HTTPClient.readable(self) + else: + return TLSHandshake.readable(self) + + def handle_read(self): + if self.tlsDone: + HTTPClient.handle_read(self) + else: + TLSHandshake.handle_read(self) + + def writable(self): + if self.tlsDone: + return HTTPClient.writable(self) + else: + return TLSHandshake.writable(self) + + def handle_write(self): + if self.tlsDone: + HTTPClient.handle_write(self) + else: + TLSHandshake.handle_write(self) + + +if __name__ == "__main__": + client = HTTPSClient('anarchy.economicsofbitcoin.com', '/') + asyncore.loop() diff --git a/src/tests/mock/pybitmessage/network/invthread.py b/src/tests/mock/pybitmessage/network/invthread.py new file mode 100644 index 00000000..e68b7692 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/invthread.py @@ -0,0 +1,111 @@ +""" +Thread to send inv annoucements +""" +import Queue +import random +from time import time + +import addresses +import protocol +import state +from network.connectionpool import BMConnectionPool +from network.dandelion import Dandelion +from queues import invQueue +from threads import StoppableThread + + +def handleExpiredDandelion(expired): + """For expired dandelion objects, mark all remotes as not having + the object""" + if not expired: + return + for i in BMConnectionPool().connections(): + if not i.fullyEstablished: + continue + for x in expired: + streamNumber, hashid, _ = x + try: + del i.objectsNewToMe[hashid] + except KeyError: + if streamNumber in i.streams: + with i.objectsNewToThemLock: + i.objectsNewToThem[hashid] = time() + + +class InvThread(StoppableThread): + """Main thread that sends inv annoucements""" + + name = "InvBroadcaster" + + @staticmethod + def handleLocallyGenerated(stream, hashId): + """Locally generated inventory items require special handling""" + Dandelion().addHash(hashId, stream=stream) + for connection in BMConnectionPool().connections(): + if state.dandelion and connection != \ + Dandelion().objectChildStem(hashId): + continue + connection.objectsNewToThem[hashId] = time() + + def run(self): # pylint: disable=too-many-branches + while not state.shutdown: # pylint: disable=too-many-nested-blocks + chunk = [] + while True: + # Dandelion fluff trigger by expiration + handleExpiredDandelion(Dandelion().expire()) + try: + data = invQueue.get(False) + chunk.append((data[0], data[1])) + # locally generated + if len(data) == 2 or data[2] is None: + self.handleLocallyGenerated(data[0], data[1]) + except Queue.Empty: + break + + if chunk: + for connection in BMConnectionPool().connections(): + fluffs = [] + stems = [] + for inv in chunk: + if inv[0] not in connection.streams: + continue + try: + with connection.objectsNewToThemLock: + del connection.objectsNewToThem[inv[1]] + except KeyError: + continue + try: + if connection == Dandelion().objectChildStem(inv[1]): + # Fluff trigger by RNG + # auto-ignore if config set to 0, i.e. dandelion is off + if random.randint(1, 100) >= state.dandelion: + fluffs.append(inv[1]) + # send a dinv only if the stem node supports dandelion + elif connection.services & protocol.NODE_DANDELION > 0: + stems.append(inv[1]) + else: + fluffs.append(inv[1]) + except KeyError: + fluffs.append(inv[1]) + + if fluffs: + random.shuffle(fluffs) + connection.append_write_buf(protocol.CreatePacket( + 'inv', + addresses.encodeVarint( + len(fluffs)) + ''.join(fluffs))) + if stems: + random.shuffle(stems) + connection.append_write_buf(protocol.CreatePacket( + 'dinv', + addresses.encodeVarint( + len(stems)) + ''.join(stems))) + + invQueue.iterate() + for _ in range(len(chunk)): + invQueue.task_done() + + if Dandelion().refresh < time(): + Dandelion().reRandomiseStems() + + self.stop.wait(1) diff --git a/src/tests/mock/pybitmessage/network/knownnodes.py b/src/tests/mock/pybitmessage/network/knownnodes.py new file mode 100644 index 00000000..77a01fcc --- /dev/null +++ b/src/tests/mock/pybitmessage/network/knownnodes.py @@ -0,0 +1,269 @@ +""" +Manipulations with knownNodes dictionary. +""" +# TODO: knownnodes object maybe? +# pylint: disable=global-statement + +import json +import logging +import os +import pickle +import threading +import time +try: + from collections.abc import Iterable +except ImportError: + from collections import Iterable + +from pybitmessage import state +from pybitmessage.bmconfigparser import BMConfigParser +from pybitmessage.network.node import Peer + +state.Peer = Peer + +knownNodesLock = threading.RLock() +"""Thread lock for knownnodes modification""" +knownNodes = {stream: {} for stream in range(1, 4)} +"""The dict of known nodes for each stream""" + +knownNodesTrimAmount = 2000 +"""trim stream knownnodes dict to this length""" + +knownNodesForgetRating = -0.5 +"""forget a node after rating is this low""" + +knownNodesActual = False + +logger = logging.getLogger('default') + +DEFAULT_NODES = ( + Peer('5.45.99.75', 8444), + Peer('75.167.159.54', 8444), + Peer('95.165.168.168', 8444), + Peer('85.180.139.241', 8444), + Peer('158.222.217.190', 8080), + Peer('178.62.12.187', 8448), + Peer('24.188.198.204', 8111), + Peer('109.147.204.113', 1195), + Peer('178.11.46.221', 8444) +) + + +def json_serialize_knownnodes(output): + """ + Reorganize knownnodes dict and write it as JSON to output + """ + _serialized = [] + for stream, peers in knownNodes.iteritems(): + for peer, info in peers.iteritems(): + info.update(rating=round(info.get('rating', 0), 2)) + _serialized.append({ + 'stream': stream, 'peer': peer._asdict(), 'info': info + }) + json.dump(_serialized, output, indent=4) + + +def json_deserialize_knownnodes(source): + """ + Read JSON from source and make knownnodes dict + """ + global knownNodesActual + for node in json.load(source): + peer = node['peer'] + info = node['info'] + peer = Peer(str(peer['host']), peer.get('port', 8444)) + knownNodes[node['stream']][peer] = info + if not (knownNodesActual + or info.get('self')) and peer not in DEFAULT_NODES: + knownNodesActual = True + + +def pickle_deserialize_old_knownnodes(source): + """ + Unpickle source and reorganize knownnodes dict if it has old format + the old format was {Peer:lastseen, ...} + the new format is {Peer:{"lastseen":i, "rating":f}} + """ + global knownNodes + knownNodes = pickle.load(source) + for stream in knownNodes.keys(): + for node, params in knownNodes[stream].iteritems(): + if isinstance(params, (float, int)): + addKnownNode(stream, node, params) + + +def saveKnownNodes(dirName=None): + """Save knownnodes to filesystem""" + if dirName is None: + dirName = state.appdata + with knownNodesLock: + with open(os.path.join(dirName, 'knownnodes.dat'), 'wb') as output: + json_serialize_knownnodes(output) + + +def addKnownNode(stream, peer, lastseen=None, is_self=False): + """ + Add a new node to the dict or update lastseen if it already exists. + Do it for each stream number if *stream* is `Iterable`. + Returns True if added a new node. + """ + # pylint: disable=too-many-branches + if isinstance(stream, Iterable): + with knownNodesLock: + for s in stream: + addKnownNode(s, peer, lastseen, is_self) + return + + rating = 0.0 + if not lastseen: + # FIXME: maybe about 28 days? + lastseen = int(time.time()) + else: + lastseen = int(lastseen) + try: + info = knownNodes[stream].get(peer) + if lastseen > info['lastseen']: + info['lastseen'] = lastseen + except (KeyError, TypeError): + pass + else: + return + + if not is_self: + if len(knownNodes[stream]) > BMConfigParser().safeGetInt( + "knownnodes", "maxnodes"): + return + + knownNodes[stream][peer] = { + 'lastseen': lastseen, + 'rating': rating or 1 if is_self else 0, + 'self': is_self, + } + return True + + +def createDefaultKnownNodes(): + """Creating default Knownnodes""" + past = time.time() - 2418600 # 28 days - 10 min + for peer in DEFAULT_NODES: + addKnownNode(1, peer, past) + saveKnownNodes() + + +def readKnownNodes(): + """Load knownnodes from filesystem""" + try: + with open(state.appdata + 'knownnodes.dat', 'rb') as source: + with knownNodesLock: + try: + json_deserialize_knownnodes(source) + except ValueError: + source.seek(0) + pickle_deserialize_old_knownnodes(source) + except (IOError, OSError, KeyError, EOFError): + logger.debug( + 'Failed to read nodes from knownnodes.dat', exc_info=True) + createDefaultKnownNodes() + + config = BMConfigParser() + + # your own onion address, if setup + onionhostname = config.safeGet('bitmessagesettings', 'onionhostname') + if onionhostname and ".onion" in onionhostname: + onionport = config.safeGetInt('bitmessagesettings', 'onionport') + if onionport: + self_peer = Peer(onionhostname, onionport) + addKnownNode(1, self_peer, is_self=True) + state.ownAddresses[self_peer] = True + + +def increaseRating(peer): + """Increase rating of a peer node""" + increaseAmount = 0.1 + maxRating = 1 + with knownNodesLock: + for stream in knownNodes.keys(): + try: + knownNodes[stream][peer]["rating"] = min( + knownNodes[stream][peer]["rating"] + increaseAmount, + maxRating + ) + except KeyError: + pass + + +def decreaseRating(peer): + """Decrease rating of a peer node""" + decreaseAmount = 0.1 + minRating = -1 + with knownNodesLock: + for stream in knownNodes.keys(): + try: + knownNodes[stream][peer]["rating"] = max( + knownNodes[stream][peer]["rating"] - decreaseAmount, + minRating + ) + except KeyError: + pass + + +def trimKnownNodes(recAddrStream=1): + """Triming Knownnodes""" + if len(knownNodes[recAddrStream]) < \ + BMConfigParser().safeGetInt("knownnodes", "maxnodes"): + return + with knownNodesLock: + oldestList = sorted( + knownNodes[recAddrStream], + key=lambda x: x['lastseen'] + )[:knownNodesTrimAmount] + for oldest in oldestList: + del knownNodes[recAddrStream][oldest] + + +def dns(): + """Add DNS names to knownnodes""" + for port in [8080, 8444]: + addKnownNode( + 1, Peer('bootstrap%s.bitmessage.org' % port, port)) + + +def cleanupKnownNodes(): + """ + Cleanup knownnodes: remove old nodes and nodes with low rating + """ + global knownNodesActual + now = int(time.time()) + needToWriteKnownNodesToDisk = False + + with knownNodesLock: + for stream in knownNodes: + if stream not in state.streamsInWhichIAmParticipating: + continue + keys = knownNodes[stream].keys() + for node in keys: + if len(knownNodes[stream]) <= 1: # leave at least one node + if stream == 1: + knownNodesActual = False + break + try: + age = now - knownNodes[stream][node]["lastseen"] + # scrap old nodes (age > 28 days) + if age > 2419200: + needToWriteKnownNodesToDisk = True + del knownNodes[stream][node] + continue + # scrap old nodes (age > 3 hours) with low rating + if (age > 10800 and knownNodes[stream][node]["rating"] + <= knownNodesForgetRating): + needToWriteKnownNodesToDisk = True + del knownNodes[stream][node] + continue + except TypeError: + logger.warning('Error in %s', node) + keys = [] + + # Let us write out the knowNodes to disk + # if there is anything new to write out. + if needToWriteKnownNodesToDisk: + saveKnownNodes() diff --git a/src/tests/mock/pybitmessage/network/networkthread.py b/src/tests/mock/pybitmessage/network/networkthread.py new file mode 100644 index 00000000..61ff6c09 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/networkthread.py @@ -0,0 +1,42 @@ +""" +A thread to handle network concerns +""" +import network.asyncore_pollchoose as asyncore +import state +from network.connectionpool import BMConnectionPool +from queues import excQueue +from threads import StoppableThread + + +class BMNetworkThread(StoppableThread): + """Main network thread""" + name = "Asyncore" + + def run(self): + try: + while not self._stopped and state.shutdown == 0: + BMConnectionPool().loop() + except Exception as e: + excQueue.put((self.name, e)) + raise + + def stopThread(self): + super(BMNetworkThread, self).stopThread() + for i in BMConnectionPool().listeningSockets.values(): + try: + i.close() + except: + pass + for i in BMConnectionPool().outboundConnections.values(): + try: + i.close() + except: + pass + for i in BMConnectionPool().inboundConnections.values(): + try: + i.close() + except: + pass + + # just in case + asyncore.close_all() diff --git a/src/tests/mock/pybitmessage/network/node.py b/src/tests/mock/pybitmessage/network/node.py new file mode 100644 index 00000000..4c532b81 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/node.py @@ -0,0 +1,7 @@ +""" +Named tuples representing the network peers +""" +import collections + +Peer = collections.namedtuple('Peer', ['host', 'port']) +Node = collections.namedtuple('Node', ['services', 'host', 'port']) diff --git a/src/tests/mock/pybitmessage/network/objectracker.py b/src/tests/mock/pybitmessage/network/objectracker.py new file mode 100644 index 00000000..65e06de4 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/objectracker.py @@ -0,0 +1,136 @@ +""" +Module for tracking objects +""" +import time +from threading import RLock + +import network.connectionpool +from network.dandelion import Dandelion +from randomtrackingdict import RandomTrackingDict + +haveBloom = False + +try: + # pybloomfiltermmap + from pybloomfilter import BloomFilter + haveBloom = True +except ImportError: + try: + # pybloom + from pybloom import BloomFilter + haveBloom = True + except ImportError: + pass + +# it isn't actually implemented yet so no point in turning it on +haveBloom = False + +# tracking pending downloads globally, for stats +missingObjects = {} + + +class ObjectTracker(object): + """Object tracker mixin""" + invCleanPeriod = 300 + invInitialCapacity = 50000 + invErrorRate = 0.03 + trackingExpires = 3600 + initialTimeOffset = 60 + + def __init__(self): + self.objectsNewToMe = RandomTrackingDict() + self.objectsNewToThem = {} + self.objectsNewToThemLock = RLock() + self.initInvBloom() + self.initAddrBloom() + self.lastCleaned = time.time() + + def initInvBloom(self): + """Init bloom filter for tracking. WIP.""" + if haveBloom: + # lock? + self.invBloom = BloomFilter( + capacity=ObjectTracker.invInitialCapacity, + error_rate=ObjectTracker.invErrorRate) + + def initAddrBloom(self): + """Init bloom filter for tracking addrs, WIP. + This either needs to be moved to addrthread.py or removed.""" + if haveBloom: + # lock? + self.addrBloom = BloomFilter( + capacity=ObjectTracker.invInitialCapacity, + error_rate=ObjectTracker.invErrorRate) + + def clean(self): + """Clean up tracking to prevent memory bloat""" + if self.lastCleaned < time.time() - ObjectTracker.invCleanPeriod: + if haveBloom: + if missingObjects == 0: + self.initInvBloom() + self.initAddrBloom() + else: + # release memory + deadline = time.time() - ObjectTracker.trackingExpires + with self.objectsNewToThemLock: + self.objectsNewToThem = { + k: v + for k, v in self.objectsNewToThem.iteritems() + if v >= deadline} + self.lastCleaned = time.time() + + def hasObj(self, hashid): + """Do we already have object?""" + if haveBloom: + return hashid in self.invBloom + return hashid in self.objectsNewToMe + + def handleReceivedInventory(self, hashId): + """Handling received inventory""" + if haveBloom: + self.invBloom.add(hashId) + try: + with self.objectsNewToThemLock: + del self.objectsNewToThem[hashId] + except KeyError: + pass + if hashId not in missingObjects: + missingObjects[hashId] = time.time() + self.objectsNewToMe[hashId] = True + + def handleReceivedObject(self, streamNumber, hashid): + """Handling received object""" + for i in network.connectionpool.BMConnectionPool().connections(): + if not i.fullyEstablished: + continue + try: + del i.objectsNewToMe[hashid] + except KeyError: + if streamNumber in i.streams and ( + not Dandelion().hasHash(hashid) + or Dandelion().objectChildStem(hashid) == i): + with i.objectsNewToThemLock: + i.objectsNewToThem[hashid] = time.time() + # update stream number, + # which we didn't have when we just received the dinv + # also resets expiration of the stem mode + Dandelion().setHashStream(hashid, streamNumber) + + if i == self: + try: + with i.objectsNewToThemLock: + del i.objectsNewToThem[hashid] + except KeyError: + pass + self.objectsNewToMe.setLastObject() + + def hasAddr(self, addr): + """WIP, should be moved to addrthread.py or removed""" + if haveBloom: + return addr in self.invBloom + return None + + def addAddr(self, hashid): + """WIP, should be moved to addrthread.py or removed""" + if haveBloom: + self.addrBloom.add(hashid) diff --git a/src/tests/mock/pybitmessage/network/proxy.py b/src/tests/mock/pybitmessage/network/proxy.py new file mode 100644 index 00000000..3bd3cc66 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/proxy.py @@ -0,0 +1,148 @@ +""" +Set proxy if avaiable otherwise exception +""" +# pylint: disable=protected-access +import logging +import socket +import time + +import asyncore_pollchoose as asyncore +from advanceddispatcher import AdvancedDispatcher +from bmconfigparser import BMConfigParser +from node import Peer + +logger = logging.getLogger('default') + + +class ProxyError(Exception): + """Base proxy exception class""" + errorCodes = ("Unknown error",) + + def __init__(self, code=-1): + self.code = code + try: + self.message = self.errorCodes[code] + except IndexError: + self.message = self.errorCodes[-1] + super(ProxyError, self).__init__(self.message) + + +class GeneralProxyError(ProxyError): + """General proxy error class (not specfic to an implementation)""" + errorCodes = ( + "Success", + "Invalid data", + "Not connected", + "Not available", + "Bad proxy type", + "Bad input", + "Timed out", + "Network unreachable", + "Connection refused", + "Host unreachable" + ) + + +class Proxy(AdvancedDispatcher): + """Base proxy class""" + # these are global, and if you change config during runtime, + # all active/new instances should change too + _proxy = ("127.0.0.1", 9050) + _auth = None + _onion_proxy = None + _onion_auth = None + _remote_dns = True + + @property + def proxy(self): + """Return proxy IP and port""" + return self.__class__._proxy + + @proxy.setter + def proxy(self, address): + """Set proxy IP and port""" + if (not isinstance(address, tuple) or len(address) < 2 + or not isinstance(address[0], str) + or not isinstance(address[1], int)): + raise ValueError + self.__class__._proxy = address + + @property + def auth(self): + """Return proxy authentication settings""" + return self.__class__._auth + + @auth.setter + def auth(self, authTuple): + """Set proxy authentication (username and password)""" + self.__class__._auth = authTuple + + @property + def onion_proxy(self): + """ + Return separate proxy IP and port for use only with onion + addresses. Untested. + """ + return self.__class__._onion_proxy + + @onion_proxy.setter + def onion_proxy(self, address): + """Set onion proxy address""" + if address is not None and ( + not isinstance(address, tuple) or len(address) < 2 + or not isinstance(address[0], str) + or not isinstance(address[1], int) + ): + raise ValueError + self.__class__._onion_proxy = address + + @property + def onion_auth(self): + """Return proxy authentication settings for onion hosts only""" + return self.__class__._onion_auth + + @onion_auth.setter + def onion_auth(self, authTuple): + """Set proxy authentication for onion hosts only. Untested.""" + self.__class__._onion_auth = authTuple + + def __init__(self, address): + if not isinstance(address, Peer): + raise ValueError + AdvancedDispatcher.__init__(self) + self.destination = address + self.isOutbound = True + self.fullyEstablished = False + self.create_socket(socket.AF_INET, socket.SOCK_STREAM) + if BMConfigParser().safeGetBoolean( + "bitmessagesettings", "socksauthentication"): + self.auth = ( + BMConfigParser().safeGet( + "bitmessagesettings", "socksusername"), + BMConfigParser().safeGet( + "bitmessagesettings", "sockspassword")) + else: + self.auth = None + self.connect( + self.onion_proxy + if address.host.endswith(".onion") and self.onion_proxy else + self.proxy + ) + + def handle_connect(self): + """Handle connection event (to the proxy)""" + self.set_state("init") + try: + AdvancedDispatcher.handle_connect(self) + except socket.error as e: + if e.errno in asyncore._DISCONNECTED: + logger.debug( + "%s:%i: Connection failed: %s", + self.destination.host, self.destination.port, e) + return + self.state_init() + + def state_proxy_handshake_done(self): + """Handshake is complete at this point""" + self.connectedAt = time.time() + return False diff --git a/src/tests/mock/pybitmessage/network/receivequeuethread.py b/src/tests/mock/pybitmessage/network/receivequeuethread.py new file mode 100644 index 00000000..56c01b77 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/receivequeuethread.py @@ -0,0 +1,56 @@ +""" +Process data incoming from network +""" +import errno +import Queue +import socket + +import state +from network.advanceddispatcher import UnknownStateError +from network.connectionpool import BMConnectionPool +from queues import receiveDataQueue +from threads import StoppableThread + + +class ReceiveQueueThread(StoppableThread): + """This thread processes data received from the network + (which is done by the asyncore thread)""" + def __init__(self, num=0): + super(ReceiveQueueThread, self).__init__(name="ReceiveQueue_%i" % num) + + def run(self): + while not self._stopped and state.shutdown == 0: + try: + dest = receiveDataQueue.get(block=True, timeout=1) + except Queue.Empty: + continue + + if self._stopped or state.shutdown: + break + + # cycle as long as there is data + # methods should return False if there isn't enough data, + # or the connection is to be aborted + + # state_* methods should return False if there isn't + # enough data, or the connection is to be aborted + + try: + connection = BMConnectionPool().getConnectionByAddr(dest) + # connection object not found + except KeyError: + receiveDataQueue.task_done() + continue + try: + connection.process() + # state isn't implemented + except UnknownStateError: + pass + except socket.error as err: + if err.errno == errno.EBADF: + connection.set_state("close", 0) + else: + self.logger.error('Socket error: %s', err) + except: # noqa:E722 + self.logger.error('Error processing', exc_info=True) + receiveDataQueue.task_done() diff --git a/src/tests/mock/pybitmessage/network/socks4a.py b/src/tests/mock/pybitmessage/network/socks4a.py new file mode 100644 index 00000000..e9786168 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/socks4a.py @@ -0,0 +1,147 @@ +""" +SOCKS4a proxy module +""" +# pylint: disable=attribute-defined-outside-init +import logging +import socket +import struct + +from proxy import GeneralProxyError, Proxy, ProxyError + +logger = logging.getLogger('default') + + +class Socks4aError(ProxyError): + """SOCKS4a error base class""" + errorCodes = ( + "Request granted", + "Request rejected or failed", + "Request rejected because SOCKS server cannot connect to identd" + " on the client", + "Request rejected because the client program and identd report" + " different user-ids", + "Unknown error" + ) + + +class Socks4a(Proxy): + """SOCKS4a proxy class""" + def __init__(self, address=None): + Proxy.__init__(self, address) + self.ipaddr = None + self.destport = address[1] + + def state_init(self): + """Protocol initialisation (before connection is established)""" + self.set_state("auth_done", 0) + return True + + def state_pre_connect(self): + """Handle feedback from SOCKS4a while it is connecting on our behalf""" + # Get the response + if self.read_buf[0:1] != chr(0x00).encode(): + # bad data + self.close() + raise GeneralProxyError(1) + elif self.read_buf[1:2] != chr(0x5A).encode(): + # Connection failed + self.close() + if ord(self.read_buf[1:2]) in (91, 92, 93): + # socks 4 error + raise Socks4aError(ord(self.read_buf[1:2]) - 90) + else: + raise Socks4aError(4) + # Get the bound address/port + self.boundport = struct.unpack(">H", self.read_buf[2:4])[0] + self.boundaddr = self.read_buf[4:] + self.__proxysockname = (self.boundaddr, self.boundport) + if self.ipaddr: + self.__proxypeername = ( + socket.inet_ntoa(self.ipaddr), self.destination[1]) + else: + self.__proxypeername = (self.destination[0], self.destport) + self.set_state("proxy_handshake_done", length=8) + return True + + def proxy_sock_name(self): + """ + Handle return value when using SOCKS4a for DNS resolving + instead of connecting. + """ + return socket.inet_ntoa(self.__proxysockname[0]) + + +class Socks4aConnection(Socks4a): + """Child SOCKS4a class used for making outbound connections.""" + def __init__(self, address): + Socks4a.__init__(self, address=address) + + def state_auth_done(self): + """Request connection to be made""" + # Now we can request the actual connection + rmtrslv = False + self.append_write_buf( + struct.pack('>BBH', 0x04, 0x01, self.destination[1])) + # If the given destination address is an IP address, we'll + # use the IPv4 address request even if remote resolving was specified. + try: + self.ipaddr = socket.inet_aton(self.destination[0]) + self.append_write_buf(self.ipaddr) + except socket.error: + # Well it's not an IP number, so it's probably a DNS name. + if self._remote_dns: + # Resolve remotely + rmtrslv = True + self.ipaddr = None + self.append_write_buf( + struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)) + else: + # Resolve locally + self.ipaddr = socket.inet_aton( + socket.gethostbyname(self.destination[0])) + self.append_write_buf(self.ipaddr) + if self._auth: + self.append_write_buf(self._auth[0]) + self.append_write_buf(chr(0x00).encode()) + if rmtrslv: + self.append_write_buf(self.destination[0] + chr(0x00).encode()) + self.set_state("pre_connect", length=0, expectBytes=8) + return True + + def state_pre_connect(self): + """Tell SOCKS4a to initiate a connection""" + try: + return Socks4a.state_pre_connect(self) + except Socks4aError as e: + self.close_reason = e.message + self.set_state("close") + + +class Socks4aResolver(Socks4a): + """DNS resolver class using SOCKS4a""" + def __init__(self, host): + self.host = host + self.port = 8444 + Socks4a.__init__(self, address=(self.host, self.port)) + + def state_auth_done(self): + """Request connection to be made""" + # Now we can request the actual connection + self.append_write_buf( + struct.pack('>BBH', 0x04, 0xF0, self.destination[1])) + self.append_write_buf(struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)) + if self._auth: + self.append_write_buf(self._auth[0]) + self.append_write_buf(chr(0x00).encode()) + self.append_write_buf(self.host + chr(0x00).encode()) + self.set_state("pre_connect", length=0, expectBytes=8) + return True + + def resolved(self): + """ + Resolving is done, process the return value. To use this within + PyBitmessage, a callback needs to be implemented which hasn't + been done yet. + """ + logger.debug( + 'Resolved %s as %s', self.host, self.proxy_sock_name()) diff --git a/src/tests/mock/pybitmessage/network/socks5.py b/src/tests/mock/pybitmessage/network/socks5.py new file mode 100644 index 00000000..d1daae42 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/socks5.py @@ -0,0 +1,224 @@ +""" +SOCKS5 proxy module +""" +# pylint: disable=attribute-defined-outside-init + +import logging +import socket +import struct + +from node import Peer +from proxy import GeneralProxyError, Proxy, ProxyError + +logger = logging.getLogger('default') + + +class Socks5AuthError(ProxyError): + """Rised when the socks5 protocol encounters an authentication error""" + errorCodes = ( + "Succeeded", + "Authentication is required", + "All offered authentication methods were rejected", + "Unknown username or invalid password", + "Unknown error" + ) + + +class Socks5Error(ProxyError): + """Rised when socks5 protocol encounters an error""" + errorCodes = ( + "Succeeded", + "General SOCKS server failure", + "Connection not allowed by ruleset", + "Network unreachable", + "Host unreachable", + "Connection refused", + "TTL expired", + "Command not supported", + "Address type not supported", + "Unknown error" + ) + + +class Socks5(Proxy): + """A socks5 proxy base class""" + def __init__(self, address=None): + Proxy.__init__(self, address) + self.ipaddr = None + self.destport = address[1] + + def state_init(self): + """Protocol initialization (before connection is established)""" + if self._auth: + self.append_write_buf(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02)) + else: + self.append_write_buf(struct.pack('BBB', 0x05, 0x01, 0x00)) + self.set_state("auth_1", length=0, expectBytes=2) + return True + + def state_auth_1(self): + """Perform authentication if peer is requesting it.""" + ret = struct.unpack('BB', self.read_buf[:2]) + if ret[0] != 5: + # general error + raise GeneralProxyError(1) + elif ret[1] == 0: + # no auth required + self.set_state("auth_done", length=2) + elif ret[1] == 2: + # username/password + self.append_write_buf( + struct.pack( + 'BB', 1, len(self._auth[0])) + self._auth[0] + struct.pack( + 'B', len(self._auth[1])) + self._auth[1]) + self.set_state("auth_needed", length=2, expectBytes=2) + else: + if ret[1] == 0xff: + # auth error + raise Socks5AuthError(2) + else: + # other error + raise GeneralProxyError(1) + return True + + def state_auth_needed(self): + """Handle response to authentication attempt""" + ret = struct.unpack('BB', self.read_buf[0:2]) + if ret[0] != 1: + # general error + raise GeneralProxyError(1) + if ret[1] != 0: + # auth error + raise Socks5AuthError(3) + # all ok + self.set_state("auth_done", length=2) + return True + + def state_pre_connect(self): + """Handle feedback from socks5 while it is connecting on our behalf.""" + # Get the response + if self.read_buf[0:1] != chr(0x05).encode(): + self.close() + raise GeneralProxyError(1) + elif self.read_buf[1:2] != chr(0x00).encode(): + # Connection failed + self.close() + if ord(self.read_buf[1:2]) <= 8: + raise Socks5Error(ord(self.read_buf[1:2])) + else: + raise Socks5Error(9) + # Get the bound address/port + elif self.read_buf[3:4] == chr(0x01).encode(): + self.set_state("proxy_addr_1", length=4, expectBytes=4) + elif self.read_buf[3:4] == chr(0x03).encode(): + self.set_state("proxy_addr_2_1", length=4, expectBytes=1) + else: + self.close() + raise GeneralProxyError(1) + return True + + def state_proxy_addr_1(self): + """Handle IPv4 address returned for peer""" + self.boundaddr = self.read_buf[0:4] + self.set_state("proxy_port", length=4, expectBytes=2) + return True + + def state_proxy_addr_2_1(self): + """ + Handle other addresses than IPv4 returned for peer + (e.g. IPv6, onion, ...). This is part 1 which retrieves the + length of the data. + """ + self.address_length = ord(self.read_buf[0:1]) + self.set_state( + "proxy_addr_2_2", length=1, expectBytes=self.address_length) + return True + + def state_proxy_addr_2_2(self): + """ + Handle other addresses than IPv4 returned for peer + (e.g. IPv6, onion, ...). This is part 2 which retrieves the data. + """ + self.boundaddr = self.read_buf[0:self.address_length] + self.set_state("proxy_port", length=self.address_length, expectBytes=2) + return True + + def state_proxy_port(self): + """Handle peer's port being returned.""" + self.boundport = struct.unpack(">H", self.read_buf[0:2])[0] + self.__proxysockname = (self.boundaddr, self.boundport) + if self.ipaddr is not None: + self.__proxypeername = ( + socket.inet_ntoa(self.ipaddr), self.destination[1]) + else: + self.__proxypeername = (self.destination[0], self.destport) + self.set_state("proxy_handshake_done", length=2) + return True + + def proxy_sock_name(self): + """Handle return value when using SOCKS5 + for DNS resolving instead of connecting.""" + return socket.inet_ntoa(self.__proxysockname[0]) + + +class Socks5Connection(Socks5): + """Child socks5 class used for making outbound connections.""" + def state_auth_done(self): + """Request connection to be made""" + # Now we can request the actual connection + self.append_write_buf(struct.pack('BBB', 0x05, 0x01, 0x00)) + # If the given destination address is an IP address, we'll + # use the IPv4 address request even if remote resolving was specified. + try: + self.ipaddr = socket.inet_aton(self.destination[0]) + self.append_write_buf(chr(0x01).encode() + self.ipaddr) + except socket.error: # may be IPv6! + # Well it's not an IP number, so it's probably a DNS name. + if self._remote_dns: + # Resolve remotely + self.ipaddr = None + self.append_write_buf(chr(0x03).encode() + chr( + len(self.destination[0])).encode() + self.destination[0]) + else: + # Resolve locally + self.ipaddr = socket.inet_aton( + socket.gethostbyname(self.destination[0])) + self.append_write_buf(chr(0x01).encode() + self.ipaddr) + self.append_write_buf(struct.pack(">H", self.destination[1])) + self.set_state("pre_connect", length=0, expectBytes=4) + return True + + def state_pre_connect(self): + """Tell socks5 to initiate a connection""" + try: + return Socks5.state_pre_connect(self) + except Socks5Error as e: + self.close_reason = e.message + self.set_state("close") + + +class Socks5Resolver(Socks5): + """DNS resolver class using socks5""" + def __init__(self, host): + self.host = host + self.port = 8444 + Socks5.__init__(self, address=Peer(self.host, self.port)) + + def state_auth_done(self): + """Perform resolving""" + # Now we can request the actual connection + self.append_write_buf(struct.pack('BBB', 0x05, 0xF0, 0x00)) + self.append_write_buf(chr(0x03).encode() + chr( + len(self.host)).encode() + str(self.host)) + self.append_write_buf(struct.pack(">H", self.port)) + self.set_state("pre_connect", length=0, expectBytes=4) + return True + + def resolved(self): + """ + Resolving is done, process the return value. + To use this within PyBitmessage, a callback needs to be + implemented which hasn't been done yet. + """ + logger.debug( + 'Resolved %s as %s', self.host, self.proxy_sock_name()) diff --git a/src/tests/mock/pybitmessage/network/stats.py b/src/tests/mock/pybitmessage/network/stats.py new file mode 100644 index 00000000..82e6c87f --- /dev/null +++ b/src/tests/mock/pybitmessage/network/stats.py @@ -0,0 +1,78 @@ +""" +Network statistics +""" +import time + +import asyncore_pollchoose as asyncore +from network.connectionpool import BMConnectionPool +from objectracker import missingObjects + + +lastReceivedTimestamp = time.time() +lastReceivedBytes = 0 +currentReceivedSpeed = 0 +lastSentTimestamp = time.time() +lastSentBytes = 0 +currentSentSpeed = 0 + + +def connectedHostsList(): + """List of all the connected hosts""" + return BMConnectionPool().establishedConnections() + + +def sentBytes(): + """Sending Bytes""" + return asyncore.sentBytes + + +def uploadSpeed(): + """Getting upload speed""" + # pylint: disable=global-statement + global lastSentTimestamp, lastSentBytes, currentSentSpeed + currentTimestamp = time.time() + if int(lastSentTimestamp) < int(currentTimestamp): + currentSentBytes = asyncore.sentBytes + currentSentSpeed = int( + (currentSentBytes - lastSentBytes) / ( + currentTimestamp - lastSentTimestamp)) + lastSentBytes = currentSentBytes + lastSentTimestamp = currentTimestamp + return currentSentSpeed + + +def receivedBytes(): + """Receiving Bytes""" + return asyncore.receivedBytes + + +def downloadSpeed(): + """Getting download speed""" + # pylint: disable=global-statement + global lastReceivedTimestamp, lastReceivedBytes, currentReceivedSpeed + currentTimestamp = time.time() + if int(lastReceivedTimestamp) < int(currentTimestamp): + currentReceivedBytes = asyncore.receivedBytes + currentReceivedSpeed = int( + (currentReceivedBytes - lastReceivedBytes) / ( + currentTimestamp - lastReceivedTimestamp)) + lastReceivedBytes = currentReceivedBytes + lastReceivedTimestamp = currentTimestamp + return currentReceivedSpeed + + +def pendingDownload(): + """Getting pending downloads""" + return len(missingObjects) + + +def pendingUpload(): + """Getting pending uploads""" + # tmp = {} + # for connection in BMConnectionPool().inboundConnections.values() + \ + # BMConnectionPool().outboundConnections.values(): + # for k in connection.objectsNewToThem.keys(): + # tmp[k] = True + # This probably isn't the correct logic so it's disabled + # return len(tmp) + return 0 diff --git a/src/tests/mock/pybitmessage/network/tcp.py b/src/tests/mock/pybitmessage/network/tcp.py new file mode 100644 index 00000000..ff778378 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/tcp.py @@ -0,0 +1,448 @@ +""" +TCP protocol handler +""" +# pylint: disable=too-many-ancestors +import l10n +import logging +import math +import random +import socket +import time + +import addresses +import asyncore_pollchoose as asyncore +import connectionpool +import helper_random +import knownnodes +import protocol +import state +from bmconfigparser import BMConfigParser +from helper_random import randomBytes +from inventory import Inventory +from network.advanceddispatcher import AdvancedDispatcher +from network.assemble import assemble_addr +from network.bmproto import BMProto +from network.constants import MAX_OBJECT_COUNT +from network.dandelion import Dandelion +from network.objectracker import ObjectTracker +from network.socks4a import Socks4aConnection +from network.socks5 import Socks5Connection +from network.tls import TLSDispatcher +from node import Peer +from queues import invQueue, receiveDataQueue, UISignalQueue +from tr import _translate + +logger = logging.getLogger('default') + + +maximumAgeOfNodesThatIAdvertiseToOthers = 10800 #: Equals three hours +maximumTimeOffsetWrongCount = 3 #: Connections with wrong time offset + + +class TCPConnection(BMProto, TLSDispatcher): + # pylint: disable=too-many-instance-attributes + """ + .. todo:: Look to understand and/or fix the non-parent-init-called + """ + + def __init__(self, address=None, sock=None): + BMProto.__init__(self, address=address, sock=sock) + self.verackReceived = False + self.verackSent = False + self.streams = [0] + self.fullyEstablished = False + self.skipUntil = 0 + if address is None and sock is not None: + self.destination = Peer(*sock.getpeername()) + self.isOutbound = False + TLSDispatcher.__init__(self, sock, server_side=True) + self.connectedAt = time.time() + logger.debug( + 'Received connection from %s:%i', + self.destination.host, self.destination.port) + self.nodeid = randomBytes(8) + elif address is not None and sock is not None: + TLSDispatcher.__init__(self, sock, server_side=False) + self.isOutbound = True + logger.debug( + 'Outbound proxy connection to %s:%i', + self.destination.host, self.destination.port) + else: + self.destination = address + self.isOutbound = True + self.create_socket( + socket.AF_INET6 if ":" in address.host else socket.AF_INET, + socket.SOCK_STREAM) + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + TLSDispatcher.__init__(self, sock, server_side=False) + self.connect(self.destination) + logger.debug( + 'Connecting to %s:%i', + self.destination.host, self.destination.port) + try: + self.local = ( + protocol.checkIPAddress( + protocol.encodeHost(self.destination.host), True) + and not protocol.checkSocksIP(self.destination.host) + ) + except socket.error: + # it's probably a hostname + pass + self.network_group = protocol.network_group(self.destination.host) + ObjectTracker.__init__(self) # pylint: disable=non-parent-init-called + self.bm_proto_reset() + self.set_state("bm_header", expectBytes=protocol.Header.size) + + def antiIntersectionDelay(self, initial=False): + """ + This is a defense against the so called intersection attacks. + + It is called when you notice peer is requesting non-existing + objects, or right after the connection is established. It will + estimate how long an object will take to propagate across the + network, and skip processing "getdata" requests until then. This + means an attacker only has one shot per IP to perform the attack. + """ + # estimated time for a small object to propagate across the + # whole network + max_known_nodes = max( + len(knownnodes.knownNodes[x]) for x in knownnodes.knownNodes) + delay = math.ceil(math.log(max_known_nodes + 2, 20)) * ( + 0.2 + invQueue.queueCount / 2.0) + # take the stream with maximum amount of nodes + # +2 is to avoid problems with log(0) and log(1) + # 20 is avg connected nodes count + # 0.2 is avg message transmission time + if delay > 0: + if initial: + self.skipUntil = self.connectedAt + delay + if self.skipUntil > time.time(): + logger.debug( + 'Initial skipping processing getdata for %.2fs', + self.skipUntil - time.time()) + else: + logger.debug( + 'Skipping processing getdata due to missing object' + ' for %.2fs', delay) + self.skipUntil = time.time() + delay + + def checkTimeOffsetNotification(self): + """ + Check if we have connected to too many nodes which have too high + time offset from us + """ + if BMProto.timeOffsetWrongCount > \ + maximumTimeOffsetWrongCount and \ + not self.fullyEstablished: + UISignalQueue.put(( + 'updateStatusBar', + _translate( + "MainWindow", + "The time on your computer, %1, may be wrong. " + "Please verify your settings." + ).arg(l10n.formatTimestamp()))) + + def state_connection_fully_established(self): + """ + State after the bitmessage protocol handshake is completed + (version/verack exchange, and if both side support TLS, + the TLS handshake as well). + """ + self.set_connection_fully_established() + self.set_state("bm_header") + self.bm_proto_reset() + return True + + def set_connection_fully_established(self): + """Initiate inventory synchronisation.""" + if not self.isOutbound and not self.local: + state.clientHasReceivedIncomingConnections = True + UISignalQueue.put(('setStatusIcon', 'green')) + UISignalQueue.put(( + 'updateNetworkStatusTab', (self.isOutbound, True, self.destination) + )) + self.antiIntersectionDelay(True) + self.fullyEstablished = True + # The connection having host suitable for knownnodes + if self.isOutbound or not self.local and not state.socksIP: + knownnodes.increaseRating(self.destination) + knownnodes.addKnownNode( + self.streams, self.destination, time.time()) + Dandelion().maybeAddStem(self) + self.sendAddr() + self.sendBigInv() + + def sendAddr(self): + """Send a partial list of known addresses to peer.""" + # We are going to share a maximum number of 1000 addrs (per overlapping + # stream) with our peer. 500 from overlapping streams, 250 from the + # left child stream, and 250 from the right child stream. + maxAddrCount = BMConfigParser().safeGetInt( + "bitmessagesettings", "maxaddrperstreamsend", 500) + + templist = [] + addrs = {} + for stream in self.streams: + with knownnodes.knownNodesLock: + for n, s in enumerate((stream, stream * 2, stream * 2 + 1)): + nodes = knownnodes.knownNodes.get(s) + if not nodes: + continue + # only if more recent than 3 hours + # and having positive or neutral rating + filtered = [ + (k, v) for k, v in nodes.iteritems() + if v["lastseen"] > int(time.time()) + - maximumAgeOfNodesThatIAdvertiseToOthers + and v["rating"] >= 0 and len(k.host) <= 22 + ] + # sent 250 only if the remote isn't interested in it + elemCount = min( + len(filtered), + maxAddrCount / 2 if n else maxAddrCount) + addrs[s] = helper_random.randomsample(filtered, elemCount) + for substream in addrs: + for peer, params in addrs[substream]: + templist.append((substream, peer, params["lastseen"])) + if templist: + self.append_write_buf(assemble_addr(templist)) + + def sendBigInv(self): + """ + Send hashes of all inventory objects, chunked as the protocol has + a per-command limit. + """ + def sendChunk(): + """Send one chunk of inv entries in one command""" + if objectCount == 0: + return + logger.debug( + 'Sending huge inv message with %i objects to just this' + ' one peer', objectCount) + self.append_write_buf(protocol.CreatePacket( + 'inv', addresses.encodeVarint(objectCount) + payload)) + + # Select all hashes for objects in this stream. + bigInvList = {} + for stream in self.streams: + # may lock for a long time, but I think it's better than + # thousands of small locks + with self.objectsNewToThemLock: + for objHash in Inventory().unexpired_hashes_by_stream(stream): + # don't advertise stem objects on bigInv + if Dandelion().hasHash(objHash): + continue + bigInvList[objHash] = 0 + objectCount = 0 + payload = b'' + # Now let us start appending all of these hashes together. + # They will be sent out in a big inv message to our new peer. + for obj_hash, _ in bigInvList.items(): + payload += obj_hash + objectCount += 1 + + # Remove -1 below when sufficient time has passed for users to + # upgrade to versions of PyBitmessage that accept inv with 50,000 + # items + if objectCount >= MAX_OBJECT_COUNT - 1: + sendChunk() + payload = b'' + objectCount = 0 + + # flush + sendChunk() + + def handle_connect(self): + """Callback for TCP connection being established.""" + try: + AdvancedDispatcher.handle_connect(self) + except socket.error as e: + # pylint: disable=protected-access + if e.errno in asyncore._DISCONNECTED: + logger.debug( + '%s:%i: Connection failed: %s', + self.destination.host, self.destination.port, e) + return + self.nodeid = randomBytes(8) + self.append_write_buf( + protocol.assembleVersionMessage( + self.destination.host, self.destination.port, + connectionpool.BMConnectionPool().streams, + False, nodeid=self.nodeid)) + self.connectedAt = time.time() + receiveDataQueue.put(self.destination) + + def handle_read(self): + """Callback for reading from a socket""" + TLSDispatcher.handle_read(self) + receiveDataQueue.put(self.destination) + + def handle_write(self): + """Callback for writing to a socket""" + TLSDispatcher.handle_write(self) + + def handle_close(self): + """Callback for connection being closed.""" + host_is_global = self.isOutbound or not self.local and not state.socksIP + if self.fullyEstablished: + UISignalQueue.put(( + 'updateNetworkStatusTab', + (self.isOutbound, False, self.destination) + )) + if host_is_global: + knownnodes.addKnownNode( + self.streams, self.destination, time.time()) + Dandelion().maybeRemoveStem(self) + else: + self.checkTimeOffsetNotification() + if host_is_global: + knownnodes.decreaseRating(self.destination) + BMProto.handle_close(self) + + +class Socks5BMConnection(Socks5Connection, TCPConnection): + """SOCKS5 wrapper for TCP connections""" + + def __init__(self, address): + Socks5Connection.__init__(self, address=address) + TCPConnection.__init__(self, address=address, sock=self.socket) + self.set_state("init") + + def state_proxy_handshake_done(self): + """ + State when SOCKS5 connection succeeds, we need to send a + Bitmessage handshake to peer. + """ + Socks5Connection.state_proxy_handshake_done(self) + self.nodeid = randomBytes(8) + self.append_write_buf( + protocol.assembleVersionMessage( + self.destination.host, self.destination.port, + connectionpool.BMConnectionPool().streams, + False, nodeid=self.nodeid)) + self.set_state("bm_header", expectBytes=protocol.Header.size) + return True + + +class Socks4aBMConnection(Socks4aConnection, TCPConnection): + """SOCKS4a wrapper for TCP connections""" + + def __init__(self, address): + Socks4aConnection.__init__(self, address=address) + TCPConnection.__init__(self, address=address, sock=self.socket) + self.set_state("init") + + def state_proxy_handshake_done(self): + """ + State when SOCKS4a connection succeeds, we need to send a + Bitmessage handshake to peer. + """ + Socks4aConnection.state_proxy_handshake_done(self) + self.nodeid = randomBytes(8) + self.append_write_buf( + protocol.assembleVersionMessage( + self.destination.host, self.destination.port, + connectionpool.BMConnectionPool().streams, + False, nodeid=self.nodeid)) + self.set_state("bm_header", expectBytes=protocol.Header.size) + return True + + +def bootstrap(connection_class): + """Make bootstrapper class for connection type (connection_class)""" + class Bootstrapper(connection_class): + """Base class for bootstrappers""" + _connection_base = connection_class + + def __init__(self, host, port): + self._connection_base.__init__(self, Peer(host, port)) + self.close_reason = self._succeed = False + + def bm_command_addr(self): + """ + Got addr message - the bootstrap succeed. + Let BMProto process the addr message and switch state to 'close' + """ + BMProto.bm_command_addr(self) + self._succeed = True + self.close_reason = "Thanks for bootstrapping!" + self.set_state("close") + + def set_connection_fully_established(self): + """Only send addr here""" + # pylint: disable=attribute-defined-outside-init + self.fullyEstablished = True + self.sendAddr() + + def handle_close(self): + """ + After closing the connection switch knownnodes.knownNodesActual + back to False if the bootstrapper failed. + """ + BMProto.handle_close(self) + if not self._succeed: + knownnodes.knownNodesActual = False + + return Bootstrapper + + +class TCPServer(AdvancedDispatcher): + """TCP connection server for Bitmessage protocol""" + + def __init__(self, host='127.0.0.1', port=8444): + if not hasattr(self, '_map'): + AdvancedDispatcher.__init__(self) + self.create_socket(socket.AF_INET, socket.SOCK_STREAM) + self.set_reuse_addr() + for attempt in range(50): + try: + if attempt > 0: + logger.warning('Failed to bind on port %s', port) + port = random.randint(32767, 65535) + self.bind((host, port)) + except socket.error as e: + if e.errno in (asyncore.EADDRINUSE, asyncore.WSAEADDRINUSE): + continue + else: + if attempt > 0: + logger.warning('Setting port to %s', port) + BMConfigParser().set( + 'bitmessagesettings', 'port', str(port)) + BMConfigParser().save() + break + self.destination = Peer(host, port) + self.bound = True + self.listen(5) + + def is_bound(self): + """Is the socket bound?""" + try: + return self.bound + except AttributeError: + return False + + def handle_accept(self): + """Incoming connection callback""" + try: + sock = self.accept()[0] + except (TypeError, IndexError): + return + + state.ownAddresses[Peer(*sock.getsockname())] = True + if ( + len(connectionpool.BMConnectionPool()) + > BMConfigParser().safeGetInt( + 'bitmessagesettings', 'maxtotalconnections') + + BMConfigParser().safeGetInt( + 'bitmessagesettings', 'maxbootstrapconnections') + 10 + ): + # 10 is a sort of buffer, in between it will go through + # the version handshake and return an error to the peer + logger.warning("Server full, dropping connection") + sock.close() + return + try: + connectionpool.BMConnectionPool().addConnection( + TCPConnection(sock=sock)) + except socket.error: + pass diff --git a/src/tests/mock/pybitmessage/network/threads.py b/src/tests/mock/pybitmessage/network/threads.py new file mode 100644 index 00000000..9bdaa85d --- /dev/null +++ b/src/tests/mock/pybitmessage/network/threads.py @@ -0,0 +1,49 @@ +"""Threading primitives for the network package""" + +import logging +import random +import threading +from contextlib import contextmanager + + +class StoppableThread(threading.Thread): + """Base class for application threads with stopThread method""" + name = None + logger = logging.getLogger('default') + + def __init__(self, name=None): + if name: + self.name = name + super(StoppableThread, self).__init__(name=self.name) + self.stop = threading.Event() + self._stopped = False + random.seed() + self.logger.info('Init thread %s', self.name) + + def stopThread(self): + """Stop the thread""" + self._stopped = True + self.stop.set() + + +class BusyError(threading.ThreadError): + """ + Thread error raised when another connection holds the lock + we are trying to acquire. + """ + pass + + +@contextmanager +def nonBlocking(lock): + """ + A context manager which acquires given lock non-blocking + and raises BusyError if failed to acquire. + """ + locked = lock.acquire(False) + if not locked: + raise BusyError + try: + yield + finally: + lock.release() diff --git a/src/tests/mock/pybitmessage/network/tls.py b/src/tests/mock/pybitmessage/network/tls.py new file mode 100644 index 00000000..a3774b44 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/tls.py @@ -0,0 +1,220 @@ +""" +SSL/TLS negotiation. +""" +import logging +import os +import socket +import ssl +import sys + +import network.asyncore_pollchoose as asyncore +import paths +from network.advanceddispatcher import AdvancedDispatcher +from queues import receiveDataQueue + +logger = logging.getLogger('default') + +_DISCONNECTED_SSL = frozenset((ssl.SSL_ERROR_EOF,)) + +if sys.version_info >= (2, 7, 13): + # this means TLSv1 or higher + # in the future change to + # ssl.PROTOCOL_TLS1.2 + sslProtocolVersion = ssl.PROTOCOL_TLS # pylint: disable=no-member +elif sys.version_info >= (2, 7, 9): + # this means any SSL/TLS. + # SSLv2 and 3 are excluded with an option after context is created + sslProtocolVersion = ssl.PROTOCOL_SSLv23 +else: + # this means TLSv1, there is no way to set "TLSv1 or higher" + # or "TLSv1.2" in < 2.7.9 + sslProtocolVersion = ssl.PROTOCOL_TLSv1 + + +# ciphers +if ( + ssl.OPENSSL_VERSION_NUMBER >= 0x10100000 + and not ssl.OPENSSL_VERSION.startswith(b"LibreSSL") +): + sslProtocolCiphers = "AECDH-AES256-SHA@SECLEVEL=0" +else: + sslProtocolCiphers = "AECDH-AES256-SHA" + + +class TLSDispatcher(AdvancedDispatcher): + """TLS functionality for classes derived from AdvancedDispatcher""" + # pylint: disable=too-many-instance-attributes, too-many-arguments + # pylint: disable=super-init-not-called + def __init__(self, _=None, sock=None, certfile=None, keyfile=None, + server_side=False, ciphers=sslProtocolCiphers): + self.want_read = self.want_write = True + self.certfile = certfile or os.path.join( + paths.codePath(), 'sslkeys', 'cert.pem') + self.keyfile = keyfile or os.path.join( + paths.codePath(), 'sslkeys', 'key.pem') + self.server_side = server_side + self.ciphers = ciphers + self.tlsStarted = False + self.tlsDone = False + self.tlsVersion = "N/A" + self.isSSL = False + + def state_tls_init(self): + """Prepare sockets for TLS handshake""" + self.isSSL = True + self.tlsStarted = True + # Once the connection has been established, + # it's safe to wrap the socket. + if sys.version_info >= (2, 7, 9): + context = ssl.create_default_context( + purpose=ssl.Purpose.SERVER_AUTH + if self.server_side else ssl.Purpose.CLIENT_AUTH) + context.set_ciphers(self.ciphers) + context.set_ecdh_curve("secp256k1") + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + # also exclude TLSv1 and TLSv1.1 in the future + context.options = ssl.OP_ALL | ssl.OP_NO_SSLv2 |\ + ssl.OP_NO_SSLv3 | ssl.OP_SINGLE_ECDH_USE |\ + ssl.OP_CIPHER_SERVER_PREFERENCE + self.sslSocket = context.wrap_socket( + self.socket, server_side=self.server_side, + do_handshake_on_connect=False) + else: + self.sslSocket = ssl.wrap_socket( + self.socket, server_side=self.server_side, + ssl_version=sslProtocolVersion, + certfile=self.certfile, keyfile=self.keyfile, + ciphers=self.ciphers, do_handshake_on_connect=False) + self.sslSocket.setblocking(0) + self.want_read = self.want_write = True + self.set_state("tls_handshake") + return False + + @staticmethod + def state_tls_handshake(): + """ + Do nothing while TLS handshake is pending, as during this phase + we need to react to callbacks instead + """ + return False + + def writable(self): + """Handle writable checks for TLS-enabled sockets""" + try: + if self.tlsStarted and not self.tlsDone and not self.write_buf: + return self.want_write + except AttributeError: + pass + return AdvancedDispatcher.writable(self) + + def readable(self): + """Handle readable check for TLS-enabled sockets""" + try: + # during TLS handshake, and after flushing write buffer, + # return status of last handshake attempt + if self.tlsStarted and not self.tlsDone and not self.write_buf: + logger.debug('tls readable, %r', self.want_read) + return self.want_read + # prior to TLS handshake, + # receiveDataThread should emulate synchronous behaviour + if not self.fullyEstablished and ( + self.expectBytes == 0 or not self.write_buf_empty()): + return False + except AttributeError: + pass + return AdvancedDispatcher.readable(self) + + def handle_read(self): + """ + Handle reads for sockets during TLS handshake. Requires special + treatment as during the handshake, buffers must remain empty + and normal reads must be ignored. + """ + try: + # wait for write buffer flush + if self.tlsStarted and not self.tlsDone and not self.write_buf: + self.tls_handshake() + else: + AdvancedDispatcher.handle_read(self) + except AttributeError: + AdvancedDispatcher.handle_read(self) + except ssl.SSLError as err: + if err.errno == ssl.SSL_ERROR_WANT_READ: + return + if err.errno not in _DISCONNECTED_SSL: + logger.info("SSL Error: %s", err) + self.close_reason = "SSL Error in handle_read" + self.handle_close() + + def handle_write(self): + """ + Handle writes for sockets during TLS handshake. Requires special + treatment as during the handshake, buffers must remain empty + and normal writes must be ignored. + """ + try: + # wait for write buffer flush + if self.tlsStarted and not self.tlsDone and not self.write_buf: + self.tls_handshake() + else: + AdvancedDispatcher.handle_write(self) + except AttributeError: + AdvancedDispatcher.handle_write(self) + except ssl.SSLError as err: + if err.errno == ssl.SSL_ERROR_WANT_WRITE: + return + if err.errno not in _DISCONNECTED_SSL: + logger.info("SSL Error: %s", err) + self.close_reason = "SSL Error in handle_write" + self.handle_close() + + def tls_handshake(self): + """Perform TLS handshake and handle its stages""" + # wait for flush + if self.write_buf: + return False + # Perform the handshake. + try: + logger.debug("handshaking (internal)") + self.sslSocket.do_handshake() + except ssl.SSLError as err: + self.close_reason = "SSL Error in tls_handshake" + logger.info("%s:%i: handshake fail", *self.destination) + self.want_read = self.want_write = False + if err.args[0] == ssl.SSL_ERROR_WANT_READ: + logger.debug("want read") + self.want_read = True + if err.args[0] == ssl.SSL_ERROR_WANT_WRITE: + logger.debug("want write") + self.want_write = True + if not (self.want_write or self.want_read): + raise + except socket.error as err: + # pylint: disable=protected-access + if err.errno in asyncore._DISCONNECTED: + self.close_reason = "socket.error in tls_handshake" + self.handle_close() + else: + raise + else: + if sys.version_info >= (2, 7, 9): + self.tlsVersion = self.sslSocket.version() + logger.debug( + '%s:%i: TLS handshake success, TLS protocol version: %s', + self.destination.host, self.destination.port, + self.tlsVersion) + else: + self.tlsVersion = "TLSv1" + logger.debug( + '%s:%i: TLS handshake success', + self.destination.host, self.destination.port) + # The handshake has completed, so remove this channel and... + self.del_channel() + self.set_socket(self.sslSocket) + self.tlsDone = True + + self.bm_proto_reset() + self.set_state("connection_fully_established") + receiveDataQueue.put(self.destination) + return False diff --git a/src/tests/mock/pybitmessage/network/udp.py b/src/tests/mock/pybitmessage/network/udp.py new file mode 100644 index 00000000..3f999332 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/udp.py @@ -0,0 +1,147 @@ +""" +UDP protocol handler +""" +import logging +import socket +import time + +import protocol +import state +from bmproto import BMProto +from constants import MAX_TIME_OFFSET +from node import Peer +from objectracker import ObjectTracker +from queues import receiveDataQueue + +logger = logging.getLogger('default') + + +class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes + """Bitmessage protocol over UDP (class)""" + port = 8444 + + def __init__(self, host=None, sock=None, announcing=False): + # pylint: disable=bad-super-call + super(BMProto, self).__init__(sock=sock) + self.verackReceived = True + self.verackSent = True + # .. todo:: sort out streams + self.streams = [1] + self.fullyEstablished = True + self.skipUntil = 0 + if sock is None: + if host is None: + host = '' + self.create_socket( + socket.AF_INET6 if ":" in host else socket.AF_INET, + socket.SOCK_DGRAM + ) + self.set_socket_reuse() + logger.info("Binding UDP socket to %s:%i", host, self.port) + self.socket.bind((host, self.port)) + else: + self.socket = sock + self.set_socket_reuse() + self.listening = Peer(*self.socket.getsockname()) + self.destination = Peer(*self.socket.getsockname()) + ObjectTracker.__init__(self) + self.connecting = False + self.connected = True + self.announcing = announcing + self.set_state("bm_header", expectBytes=protocol.Header.size) + + def set_socket_reuse(self): + """Set socket reuse option""" + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except AttributeError: + pass + + # disable most commands before doing research / testing + # only addr (peer discovery), error and object are implemented + + def bm_command_getdata(self): + # return BMProto.bm_command_getdata(self) + return True + + def bm_command_inv(self): + # return BMProto.bm_command_inv(self) + return True + + def bm_command_addr(self): + addresses = self._decode_addr() + # only allow peer discovery from private IPs in order to avoid + # attacks from random IPs on the internet + if not self.local: + return True + remoteport = False + for seenTime, stream, _, ip, port in addresses: + decodedIP = protocol.checkIPAddress(str(ip)) + if stream not in state.streamsInWhichIAmParticipating: + continue + if (seenTime < time.time() - MAX_TIME_OFFSET + or seenTime > time.time() + MAX_TIME_OFFSET): + continue + if decodedIP is False: + # if the address isn't local, interpret it as + # the host's own announcement + remoteport = port + if remoteport is False: + return True + logger.debug( + "received peer discovery from %s:%i (port %i):", + self.destination.host, self.destination.port, remoteport) + state.discoveredPeers[Peer(self.destination.host, remoteport)] = \ + time.time() + return True + + def bm_command_portcheck(self): + return True + + def bm_command_ping(self): + return True + + def bm_command_pong(self): + return True + + def bm_command_verack(self): + return True + + def bm_command_version(self): + return True + + def handle_connect(self): + return + + def writable(self): + return self.write_buf + + def readable(self): + return len(self.read_buf) < self._buf_len + + def handle_read(self): + try: + recdata, addr = self.socket.recvfrom(self._buf_len) + except socket.error: + logger.error("socket error on recvfrom:", exc_info=True) + return + + self.destination = Peer(*addr) + encodedAddr = protocol.encodeHost(addr[0]) + self.local = bool(protocol.checkIPAddress(encodedAddr, True)) + # overwrite the old buffer to avoid mixing data and so that + # self.local works correctly + self.read_buf[0:] = recdata + self.bm_proto_reset() + receiveDataQueue.put(self.listening) + + def handle_write(self): + try: + retval = self.socket.sendto( + self.write_buf, ('', self.port)) + except socket.error: + logger.error("socket error on sendto:", exc_info=True) + retval = len(self.write_buf) + self.slice_write_buf(retval) diff --git a/src/tests/mock/pybitmessage/network/uploadthread.py b/src/tests/mock/pybitmessage/network/uploadthread.py new file mode 100644 index 00000000..7d80d789 --- /dev/null +++ b/src/tests/mock/pybitmessage/network/uploadthread.py @@ -0,0 +1,69 @@ +""" +`UploadThread` class definition +""" +import time + +import helper_random +import protocol +from inventory import Inventory +from network.connectionpool import BMConnectionPool +from network.dandelion import Dandelion +from randomtrackingdict import RandomTrackingDict +from threads import StoppableThread + + +class UploadThread(StoppableThread): + """ + This is a thread that uploads the objects that the peers requested from me + """ + maxBufSize = 2097152 # 2MB + name = "Uploader" + + def run(self): + while not self._stopped: + uploaded = 0 + # Choose uploading peers randomly + connections = BMConnectionPool().establishedConnections() + helper_random.randomshuffle(connections) + for i in connections: + now = time.time() + # avoid unnecessary delay + if i.skipUntil >= now: + continue + if len(i.write_buf) > self.maxBufSize: + continue + try: + request = i.pendingUpload.randomKeys( + RandomTrackingDict.maxPending) + except KeyError: + continue + payload = bytearray() + chunk_count = 0 + for chunk in request: + del i.pendingUpload[chunk] + if Dandelion().hasHash(chunk) and \ + i != Dandelion().objectChildStem(chunk): + i.antiIntersectionDelay() + self.logger.info( + '%s asked for a stem object we didn\'t offer to it.', + i.destination) + break + try: + payload.extend(protocol.CreatePacket( + 'object', Inventory()[chunk].payload)) + chunk_count += 1 + except KeyError: + i.antiIntersectionDelay() + self.logger.info( + '%s asked for an object we don\'t have.', + i.destination) + break + if not chunk_count: + continue + i.append_write_buf(payload) + self.logger.debug( + '%s:%i Uploading %i objects', + i.destination.host, i.destination.port, chunk_count) + uploaded += chunk_count + if not uploaded: + self.stop.wait(1) diff --git a/src/tests/mock/pybitmessage/openclpow.py b/src/tests/mock/pybitmessage/openclpow.py new file mode 100644 index 00000000..1091f555 --- /dev/null +++ b/src/tests/mock/pybitmessage/openclpow.py @@ -0,0 +1,111 @@ +""" +Module for Proof of Work using OpenCL +""" +import logging +import os +from struct import pack + +import paths +from bmconfigparser import BMConfigParser +from state import shutdown + +try: + import numpy + import pyopencl as cl + libAvailable = True +except ImportError: + libAvailable = False + + +logger = logging.getLogger('default') + +ctx = False +queue = False +program = False +gpus = [] +enabledGpus = [] +vendors = [] +hash_dt = None + + +def initCL(): + """Initlialise OpenCL engine""" + global ctx, queue, program, hash_dt # pylint: disable=global-statement + if libAvailable is False: + return + del enabledGpus[:] + del vendors[:] + del gpus[:] + ctx = False + try: + hash_dt = numpy.dtype([('target', numpy.uint64), ('v', numpy.str_, 73)]) + try: + for platform in cl.get_platforms(): + gpus.extend(platform.get_devices(device_type=cl.device_type.GPU)) + if BMConfigParser().safeGet("bitmessagesettings", "opencl") == platform.vendor: + enabledGpus.extend(platform.get_devices( + device_type=cl.device_type.GPU)) + if platform.vendor not in vendors: + vendors.append(platform.vendor) + except: # noqa:E722 + pass + if enabledGpus: + ctx = cl.Context(devices=enabledGpus) + queue = cl.CommandQueue(ctx) + f = open(os.path.join(paths.codePath(), "bitmsghash", 'bitmsghash.cl'), 'r') + fstr = ''.join(f.readlines()) + program = cl.Program(ctx, fstr).build(options="") + logger.info("Loaded OpenCL kernel") + else: + logger.info("No OpenCL GPUs found") + del enabledGpus[:] + except Exception: + logger.error("OpenCL fail: ", exc_info=True) + del enabledGpus[:] + + +def openclAvailable(): + """Are there any OpenCL GPUs available?""" + return bool(gpus) + + +def openclEnabled(): + """Is OpenCL enabled (and available)?""" + return bool(enabledGpus) + + +def do_opencl_pow(hash_, target): + """Perform PoW using OpenCL""" + output = numpy.zeros(1, dtype=[('v', numpy.uint64, 1)]) + if not enabledGpus: + return output[0][0] + + data = numpy.zeros(1, dtype=hash_dt, order='C') + data[0]['v'] = ("0000000000000000" + hash_).decode("hex") + data[0]['target'] = target + + hash_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=data) + dest_buf = cl.Buffer(ctx, cl.mem_flags.WRITE_ONLY, output.nbytes) + + kernel = program.kernel_sha512 + worksize = kernel.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, enabledGpus[0]) + + kernel.set_arg(0, hash_buf) + kernel.set_arg(1, dest_buf) + + progress = 0 + globamt = worksize * 2000 + + while output[0][0] == 0 and shutdown == 0: + kernel.set_arg(2, pack(" +# See LICENSE for details. +# +# Software slightly changed by Jonathan Warren +""" +This module loads openssl libs with ctypes and incapsulates +needed openssl functionality in class _OpenSSL. +""" +import ctypes +import sys + +# pylint: disable=protected-access + +OpenSSL = None + + +class CipherName(object): + """Class returns cipher name, pointer and blocksize""" + + def __init__(self, name, pointer, blocksize): + self._name = name + self._pointer = pointer + self._blocksize = blocksize + + def __str__(self): + return "Cipher : " + self._name + \ + " | Blocksize : " + str(self._blocksize) + \ + " | Function pointer : " + str(self._pointer) + + def get_pointer(self): + """This method returns cipher pointer""" + return self._pointer() + + def get_name(self): + """This method returns cipher name""" + return self._name + + def get_blocksize(self): + """This method returns cipher blocksize""" + return self._blocksize + + +def get_version(library): + """This function return version, hexversion and cflages""" + version = None + hexversion = None + cflags = None + try: + # OpenSSL 1.1 + OPENSSL_VERSION = 0 + OPENSSL_CFLAGS = 1 + library.OpenSSL_version.argtypes = [ctypes.c_int] + library.OpenSSL_version.restype = ctypes.c_char_p + version = library.OpenSSL_version(OPENSSL_VERSION) + cflags = library.OpenSSL_version(OPENSSL_CFLAGS) + library.OpenSSL_version_num.restype = ctypes.c_long + hexversion = library.OpenSSL_version_num() + except AttributeError: + try: + # OpenSSL 1.0 + SSLEAY_VERSION = 0 + SSLEAY_CFLAGS = 2 + library.SSLeay.restype = ctypes.c_long + library.SSLeay_version.restype = ctypes.c_char_p + library.SSLeay_version.argtypes = [ctypes.c_int] + version = library.SSLeay_version(SSLEAY_VERSION) + cflags = library.SSLeay_version(SSLEAY_CFLAGS) + hexversion = library.SSLeay() + except AttributeError: + # raise NotImplementedError('Cannot determine version of this OpenSSL library.') + pass + return (version, hexversion, cflags) + + +class _OpenSSL(object): + """ + Wrapper for OpenSSL using ctypes + """ + # pylint: disable=too-many-statements, too-many-instance-attributes + def __init__(self, library): + """ + Build the wrapper + """ + self._lib = ctypes.CDLL(library) + self._version, self._hexversion, self._cflags = get_version(self._lib) + self._libreSSL = self._version.startswith(b"LibreSSL") + + self.pointer = ctypes.pointer + self.c_int = ctypes.c_int + self.byref = ctypes.byref + self.create_string_buffer = ctypes.create_string_buffer + + self.BN_new = self._lib.BN_new + self.BN_new.restype = ctypes.c_void_p + self.BN_new.argtypes = [] + + self.BN_free = self._lib.BN_free + self.BN_free.restype = None + self.BN_free.argtypes = [ctypes.c_void_p] + + self.BN_clear_free = self._lib.BN_clear_free + self.BN_clear_free.restype = None + self.BN_clear_free.argtypes = [ctypes.c_void_p] + + self.BN_num_bits = self._lib.BN_num_bits + self.BN_num_bits.restype = ctypes.c_int + self.BN_num_bits.argtypes = [ctypes.c_void_p] + + self.BN_bn2bin = self._lib.BN_bn2bin + self.BN_bn2bin.restype = ctypes.c_int + self.BN_bn2bin.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + + try: + self.BN_bn2binpad = self._lib.BN_bn2binpad + self.BN_bn2binpad.restype = ctypes.c_int + self.BN_bn2binpad.argtypes = [ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_int] + except AttributeError: + # optional, we have a workaround + pass + + self.BN_bin2bn = self._lib.BN_bin2bn + self.BN_bin2bn.restype = ctypes.c_void_p + self.BN_bin2bn.argtypes = [ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p] + + self.EC_KEY_free = self._lib.EC_KEY_free + self.EC_KEY_free.restype = None + self.EC_KEY_free.argtypes = [ctypes.c_void_p] + + self.EC_KEY_new_by_curve_name = self._lib.EC_KEY_new_by_curve_name + self.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p + self.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int] + + self.EC_KEY_generate_key = self._lib.EC_KEY_generate_key + self.EC_KEY_generate_key.restype = ctypes.c_int + self.EC_KEY_generate_key.argtypes = [ctypes.c_void_p] + + self.EC_KEY_check_key = self._lib.EC_KEY_check_key + self.EC_KEY_check_key.restype = ctypes.c_int + self.EC_KEY_check_key.argtypes = [ctypes.c_void_p] + + self.EC_KEY_get0_private_key = self._lib.EC_KEY_get0_private_key + self.EC_KEY_get0_private_key.restype = ctypes.c_void_p + self.EC_KEY_get0_private_key.argtypes = [ctypes.c_void_p] + + self.EC_KEY_get0_public_key = self._lib.EC_KEY_get0_public_key + self.EC_KEY_get0_public_key.restype = ctypes.c_void_p + self.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p] + + self.EC_KEY_get0_group = self._lib.EC_KEY_get0_group + self.EC_KEY_get0_group.restype = ctypes.c_void_p + self.EC_KEY_get0_group.argtypes = [ctypes.c_void_p] + + self.EC_POINT_get_affine_coordinates_GFp = \ + self._lib.EC_POINT_get_affine_coordinates_GFp + self.EC_POINT_get_affine_coordinates_GFp.restype = ctypes.c_int + self.EC_POINT_get_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + try: + self.EC_POINT_get_affine_coordinates = \ + self._lib.EC_POINT_get_affine_coordinates + except AttributeError: + # OpenSSL docs say only use this for backwards compatibility + self.EC_POINT_get_affine_coordinates = \ + self._lib.EC_POINT_get_affine_coordinates_GF2m + self.EC_POINT_get_affine_coordinates.restype = ctypes.c_int + self.EC_POINT_get_affine_coordinates.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key + self.EC_KEY_set_private_key.restype = ctypes.c_int + self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_KEY_set_public_key = self._lib.EC_KEY_set_public_key + self.EC_KEY_set_public_key.restype = ctypes.c_int + self.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_KEY_set_group = self._lib.EC_KEY_set_group + self.EC_KEY_set_group.restype = ctypes.c_int + self.EC_KEY_set_group.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_POINT_set_affine_coordinates_GFp = \ + self._lib.EC_POINT_set_affine_coordinates_GFp + self.EC_POINT_set_affine_coordinates_GFp.restype = ctypes.c_int + self.EC_POINT_set_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + try: + self.EC_POINT_set_affine_coordinates = \ + self._lib.EC_POINT_set_affine_coordinates + except AttributeError: + # OpenSSL docs say only use this for backwards compatibility + self.EC_POINT_set_affine_coordinates = \ + self._lib.EC_POINT_set_affine_coordinates_GF2m + self.EC_POINT_set_affine_coordinates.restype = ctypes.c_int + self.EC_POINT_set_affine_coordinates.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + try: + self.EC_POINT_set_compressed_coordinates = \ + self._lib.EC_POINT_set_compressed_coordinates + except AttributeError: + # OpenSSL docs say only use this for backwards compatibility + self.EC_POINT_set_compressed_coordinates = \ + self._lib.EC_POINT_set_compressed_coordinates_GF2m + self.EC_POINT_set_compressed_coordinates.restype = ctypes.c_int + self.EC_POINT_set_compressed_coordinates.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_void_p] + + self.EC_POINT_new = self._lib.EC_POINT_new + self.EC_POINT_new.restype = ctypes.c_void_p + self.EC_POINT_new.argtypes = [ctypes.c_void_p] + + self.EC_POINT_free = self._lib.EC_POINT_free + self.EC_POINT_free.restype = None + self.EC_POINT_free.argtypes = [ctypes.c_void_p] + + self.BN_CTX_free = self._lib.BN_CTX_free + self.BN_CTX_free.restype = None + self.BN_CTX_free.argtypes = [ctypes.c_void_p] + + self.EC_POINT_mul = self._lib.EC_POINT_mul + self.EC_POINT_mul.restype = None + self.EC_POINT_mul.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key + self.EC_KEY_set_private_key.restype = ctypes.c_int + self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + if self._hexversion >= 0x10100000 and not self._libreSSL: + self.EC_KEY_OpenSSL = self._lib.EC_KEY_OpenSSL + self._lib.EC_KEY_OpenSSL.restype = ctypes.c_void_p + self._lib.EC_KEY_OpenSSL.argtypes = [] + + self.EC_KEY_set_method = self._lib.EC_KEY_set_method + self._lib.EC_KEY_set_method.restype = ctypes.c_int + self._lib.EC_KEY_set_method.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + else: + self.ECDH_OpenSSL = self._lib.ECDH_OpenSSL + self._lib.ECDH_OpenSSL.restype = ctypes.c_void_p + self._lib.ECDH_OpenSSL.argtypes = [] + + self.ECDH_set_method = self._lib.ECDH_set_method + self._lib.ECDH_set_method.restype = ctypes.c_int + self._lib.ECDH_set_method.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.ECDH_compute_key = self._lib.ECDH_compute_key + self.ECDH_compute_key.restype = ctypes.c_int + self.ECDH_compute_key.argtypes = [ctypes.c_void_p, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EVP_CipherInit_ex = self._lib.EVP_CipherInit_ex + self.EVP_CipherInit_ex.restype = ctypes.c_int + self.EVP_CipherInit_ex.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EVP_CIPHER_CTX_new = self._lib.EVP_CIPHER_CTX_new + self.EVP_CIPHER_CTX_new.restype = ctypes.c_void_p + self.EVP_CIPHER_CTX_new.argtypes = [] + + # Cipher + self.EVP_aes_128_cfb128 = self._lib.EVP_aes_128_cfb128 + self.EVP_aes_128_cfb128.restype = ctypes.c_void_p + self.EVP_aes_128_cfb128.argtypes = [] + + self.EVP_aes_256_cfb128 = self._lib.EVP_aes_256_cfb128 + self.EVP_aes_256_cfb128.restype = ctypes.c_void_p + self.EVP_aes_256_cfb128.argtypes = [] + + self.EVP_aes_128_cbc = self._lib.EVP_aes_128_cbc + self.EVP_aes_128_cbc.restype = ctypes.c_void_p + self.EVP_aes_128_cbc.argtypes = [] + + self.EVP_aes_256_cbc = self._lib.EVP_aes_256_cbc + self.EVP_aes_256_cbc.restype = ctypes.c_void_p + self.EVP_aes_256_cbc.argtypes = [] + + # self.EVP_aes_128_ctr = self._lib.EVP_aes_128_ctr + # self.EVP_aes_128_ctr.restype = ctypes.c_void_p + # self.EVP_aes_128_ctr.argtypes = [] + + # self.EVP_aes_256_ctr = self._lib.EVP_aes_256_ctr + # self.EVP_aes_256_ctr.restype = ctypes.c_void_p + # self.EVP_aes_256_ctr.argtypes = [] + + self.EVP_aes_128_ofb = self._lib.EVP_aes_128_ofb + self.EVP_aes_128_ofb.restype = ctypes.c_void_p + self.EVP_aes_128_ofb.argtypes = [] + + self.EVP_aes_256_ofb = self._lib.EVP_aes_256_ofb + self.EVP_aes_256_ofb.restype = ctypes.c_void_p + self.EVP_aes_256_ofb.argtypes = [] + + self.EVP_bf_cbc = self._lib.EVP_bf_cbc + self.EVP_bf_cbc.restype = ctypes.c_void_p + self.EVP_bf_cbc.argtypes = [] + + self.EVP_bf_cfb64 = self._lib.EVP_bf_cfb64 + self.EVP_bf_cfb64.restype = ctypes.c_void_p + self.EVP_bf_cfb64.argtypes = [] + + self.EVP_rc4 = self._lib.EVP_rc4 + self.EVP_rc4.restype = ctypes.c_void_p + self.EVP_rc4.argtypes = [] + + if self._hexversion >= 0x10100000 and not self._libreSSL: + self.EVP_CIPHER_CTX_reset = self._lib.EVP_CIPHER_CTX_reset + self.EVP_CIPHER_CTX_reset.restype = ctypes.c_int + self.EVP_CIPHER_CTX_reset.argtypes = [ctypes.c_void_p] + else: + self.EVP_CIPHER_CTX_cleanup = self._lib.EVP_CIPHER_CTX_cleanup + self.EVP_CIPHER_CTX_cleanup.restype = ctypes.c_int + self.EVP_CIPHER_CTX_cleanup.argtypes = [ctypes.c_void_p] + + self.EVP_CIPHER_CTX_free = self._lib.EVP_CIPHER_CTX_free + self.EVP_CIPHER_CTX_free.restype = None + self.EVP_CIPHER_CTX_free.argtypes = [ctypes.c_void_p] + + self.EVP_CipherUpdate = self._lib.EVP_CipherUpdate + self.EVP_CipherUpdate.restype = ctypes.c_int + self.EVP_CipherUpdate.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_int] + + self.EVP_CipherFinal_ex = self._lib.EVP_CipherFinal_ex + self.EVP_CipherFinal_ex.restype = ctypes.c_int + self.EVP_CipherFinal_ex.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p] + + self.EVP_DigestInit = self._lib.EVP_DigestInit + self.EVP_DigestInit.restype = ctypes.c_int + self._lib.EVP_DigestInit.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + + self.EVP_DigestInit_ex = self._lib.EVP_DigestInit_ex + self.EVP_DigestInit_ex.restype = ctypes.c_int + self._lib.EVP_DigestInit_ex.argtypes = 3 * [ctypes.c_void_p] + + self.EVP_DigestUpdate = self._lib.EVP_DigestUpdate + self.EVP_DigestUpdate.restype = ctypes.c_int + self.EVP_DigestUpdate.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_int] + + self.EVP_DigestFinal = self._lib.EVP_DigestFinal + self.EVP_DigestFinal.restype = ctypes.c_int + self.EVP_DigestFinal.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p] + + self.EVP_DigestFinal_ex = self._lib.EVP_DigestFinal_ex + self.EVP_DigestFinal_ex.restype = ctypes.c_int + self.EVP_DigestFinal_ex.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p] + + self.ECDSA_sign = self._lib.ECDSA_sign + self.ECDSA_sign.restype = ctypes.c_int + self.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, + ctypes.c_int, ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p] + + self.ECDSA_verify = self._lib.ECDSA_verify + self.ECDSA_verify.restype = ctypes.c_int + self.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, + ctypes.c_int, ctypes.c_void_p, + ctypes.c_int, ctypes.c_void_p] + + if self._hexversion >= 0x10100000 and not self._libreSSL: + self.EVP_MD_CTX_new = self._lib.EVP_MD_CTX_new + self.EVP_MD_CTX_new.restype = ctypes.c_void_p + self.EVP_MD_CTX_new.argtypes = [] + + self.EVP_MD_CTX_reset = self._lib.EVP_MD_CTX_reset + self.EVP_MD_CTX_reset.restype = None + self.EVP_MD_CTX_reset.argtypes = [ctypes.c_void_p] + + self.EVP_MD_CTX_free = self._lib.EVP_MD_CTX_free + self.EVP_MD_CTX_free.restype = None + self.EVP_MD_CTX_free.argtypes = [ctypes.c_void_p] + + self.EVP_sha1 = self._lib.EVP_sha1 + self.EVP_sha1.restype = ctypes.c_void_p + self.EVP_sha1.argtypes = [] + + self.digest_ecdsa_sha1 = self.EVP_sha1 + else: + self.EVP_MD_CTX_create = self._lib.EVP_MD_CTX_create + self.EVP_MD_CTX_create.restype = ctypes.c_void_p + self.EVP_MD_CTX_create.argtypes = [] + + self.EVP_MD_CTX_init = self._lib.EVP_MD_CTX_init + self.EVP_MD_CTX_init.restype = None + self.EVP_MD_CTX_init.argtypes = [ctypes.c_void_p] + + self.EVP_MD_CTX_destroy = self._lib.EVP_MD_CTX_destroy + self.EVP_MD_CTX_destroy.restype = None + self.EVP_MD_CTX_destroy.argtypes = [ctypes.c_void_p] + + self.EVP_ecdsa = self._lib.EVP_ecdsa + self._lib.EVP_ecdsa.restype = ctypes.c_void_p + self._lib.EVP_ecdsa.argtypes = [] + + self.digest_ecdsa_sha1 = self.EVP_ecdsa + + self.RAND_bytes = self._lib.RAND_bytes + self.RAND_bytes.restype = ctypes.c_int + self.RAND_bytes.argtypes = [ctypes.c_void_p, ctypes.c_int] + + self.EVP_sha256 = self._lib.EVP_sha256 + self.EVP_sha256.restype = ctypes.c_void_p + self.EVP_sha256.argtypes = [] + + self.i2o_ECPublicKey = self._lib.i2o_ECPublicKey + self.i2o_ECPublicKey.restype = ctypes.c_void_p + self.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + + self.EVP_sha512 = self._lib.EVP_sha512 + self.EVP_sha512.restype = ctypes.c_void_p + self.EVP_sha512.argtypes = [] + + self.HMAC = self._lib.HMAC + self.HMAC.restype = ctypes.c_void_p + self.HMAC.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p] + + try: + self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC + except Exception: + # The above is not compatible with all versions of OSX. + self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC_SHA1 + + self.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int + self.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_int, + ctypes.c_int, ctypes.c_void_p, + ctypes.c_int, ctypes.c_void_p] + + # Blind signature requirements + self.BN_CTX_new = self._lib.BN_CTX_new + self.BN_CTX_new.restype = ctypes.c_void_p + self.BN_CTX_new.argtypes = [] + + self.BN_dup = self._lib.BN_dup + self.BN_dup.restype = ctypes.c_void_p + self.BN_dup.argtypes = [ctypes.c_void_p] + + self.BN_rand = self._lib.BN_rand + self.BN_rand.restype = ctypes.c_int + self.BN_rand.argtypes = [ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int] + + self.BN_set_word = self._lib.BN_set_word + self.BN_set_word.restype = ctypes.c_int + self.BN_set_word.argtypes = [ctypes.c_void_p, + ctypes.c_ulong] + + self.BN_mul = self._lib.BN_mul + self.BN_mul.restype = ctypes.c_int + self.BN_mul.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.BN_mod_add = self._lib.BN_mod_add + self.BN_mod_add.restype = ctypes.c_int + self.BN_mod_add.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.BN_mod_inverse = self._lib.BN_mod_inverse + self.BN_mod_inverse.restype = ctypes.c_void_p + self.BN_mod_inverse.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.BN_mod_mul = self._lib.BN_mod_mul + self.BN_mod_mul.restype = ctypes.c_int + self.BN_mod_mul.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.BN_lshift = self._lib.BN_lshift + self.BN_lshift.restype = ctypes.c_int + self.BN_lshift.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_int] + + self.BN_sub_word = self._lib.BN_sub_word + self.BN_sub_word.restype = ctypes.c_int + self.BN_sub_word.argtypes = [ctypes.c_void_p, + ctypes.c_ulong] + + self.BN_cmp = self._lib.BN_cmp + self.BN_cmp.restype = ctypes.c_int + self.BN_cmp.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + try: + self.BN_is_odd = self._lib.BN_is_odd + self.BN_is_odd.restype = ctypes.c_int + self.BN_is_odd.argtypes = [ctypes.c_void_p] + except AttributeError: + # OpenSSL 1.1.0 implements this as a function, but earlier + # versions as macro, so we need to workaround + self.BN_is_odd = self.BN_is_odd_compatible + + self.BN_bn2dec = self._lib.BN_bn2dec + self.BN_bn2dec.restype = ctypes.c_char_p + self.BN_bn2dec.argtypes = [ctypes.c_void_p] + + self.EC_GROUP_new_by_curve_name = self._lib.EC_GROUP_new_by_curve_name + self.EC_GROUP_new_by_curve_name.restype = ctypes.c_void_p + self.EC_GROUP_new_by_curve_name.argtypes = [ctypes.c_int] + + self.EC_GROUP_get_order = self._lib.EC_GROUP_get_order + self.EC_GROUP_get_order.restype = ctypes.c_int + self.EC_GROUP_get_order.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_GROUP_get_cofactor = self._lib.EC_GROUP_get_cofactor + self.EC_GROUP_get_cofactor.restype = ctypes.c_int + self.EC_GROUP_get_cofactor.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_GROUP_get0_generator = self._lib.EC_GROUP_get0_generator + self.EC_GROUP_get0_generator.restype = ctypes.c_void_p + self.EC_GROUP_get0_generator.argtypes = [ctypes.c_void_p] + + self.EC_POINT_copy = self._lib.EC_POINT_copy + self.EC_POINT_copy.restype = ctypes.c_int + self.EC_POINT_copy.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_POINT_add = self._lib.EC_POINT_add + self.EC_POINT_add.restype = ctypes.c_int + self.EC_POINT_add.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_POINT_cmp = self._lib.EC_POINT_cmp + self.EC_POINT_cmp.restype = ctypes.c_int + self.EC_POINT_cmp.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_POINT_set_to_infinity = self._lib.EC_POINT_set_to_infinity + self.EC_POINT_set_to_infinity.restype = ctypes.c_int + self.EC_POINT_set_to_infinity.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self._set_ciphers() + self._set_curves() + + def _set_ciphers(self): + self.cipher_algo = { + 'aes-128-cbc': CipherName( + 'aes-128-cbc', self.EVP_aes_128_cbc, 16), + 'aes-256-cbc': CipherName( + 'aes-256-cbc', self.EVP_aes_256_cbc, 16), + 'aes-128-cfb': CipherName( + 'aes-128-cfb', self.EVP_aes_128_cfb128, 16), + 'aes-256-cfb': CipherName( + 'aes-256-cfb', self.EVP_aes_256_cfb128, 16), + 'aes-128-ofb': CipherName( + 'aes-128-ofb', self._lib.EVP_aes_128_ofb, 16), + 'aes-256-ofb': CipherName( + 'aes-256-ofb', self._lib.EVP_aes_256_ofb, 16), + # 'aes-128-ctr': CipherName( + # 'aes-128-ctr', self._lib.EVP_aes_128_ctr, 16), + # 'aes-256-ctr': CipherName( + # 'aes-256-ctr', self._lib.EVP_aes_256_ctr, 16), + 'bf-cfb': CipherName( + 'bf-cfb', self.EVP_bf_cfb64, 8), + 'bf-cbc': CipherName( + 'bf-cbc', self.EVP_bf_cbc, 8), + # 128 is the initialisation size not block size + 'rc4': CipherName( + 'rc4', self.EVP_rc4, 128), + } + + def _set_curves(self): + self.curves = { + 'secp112r1': 704, + 'secp112r2': 705, + 'secp128r1': 706, + 'secp128r2': 707, + 'secp160k1': 708, + 'secp160r1': 709, + 'secp160r2': 710, + 'secp192k1': 711, + 'secp224k1': 712, + 'secp224r1': 713, + 'secp256k1': 714, + 'secp384r1': 715, + 'secp521r1': 716, + 'sect113r1': 717, + 'sect113r2': 718, + 'sect131r1': 719, + 'sect131r2': 720, + 'sect163k1': 721, + 'sect163r1': 722, + 'sect163r2': 723, + 'sect193r1': 724, + 'sect193r2': 725, + 'sect233k1': 726, + 'sect233r1': 727, + 'sect239k1': 728, + 'sect283k1': 729, + 'sect283r1': 730, + 'sect409k1': 731, + 'sect409r1': 732, + 'sect571k1': 733, + 'sect571r1': 734, + } + + def BN_num_bytes(self, x): + """ + returns the length of a BN (OpenSSl API) + """ + return int((self.BN_num_bits(x) + 7) / 8) + + def BN_is_odd_compatible(self, x): + """ + returns if BN is odd + we assume big endianness, and that BN is initialised + """ + length = self.BN_num_bytes(x) + data = self.malloc(0, length) + OpenSSL.BN_bn2bin(x, data) + return ord(data[length - 1]) & 1 + + def get_cipher(self, name): + """ + returns the OpenSSL cipher instance + """ + if name not in self.cipher_algo: + raise Exception("Unknown cipher") + return self.cipher_algo[name] + + def get_curve(self, name): + """ + returns the id of a elliptic curve + """ + if name not in self.curves: + raise Exception("Unknown curve") + return self.curves[name] + + def get_curve_by_id(self, id_): + """ + returns the name of a elliptic curve with his id + """ + res = None + for i in self.curves: + if self.curves[i] == id_: + res = i + break + if res is None: + raise Exception("Unknown curve") + return res + + def rand(self, size): + """ + OpenSSL random function + """ + buffer_ = self.malloc(0, size) + # This pyelliptic library, by default, didn't check the return value + # of RAND_bytes. It is evidently possible that it returned an error + # and not-actually-random data. However, in tests on various + # operating systems, while generating hundreds of gigabytes of random + # strings of various sizes I could not get an error to occur. + # Also Bitcoin doesn't check the return value of RAND_bytes either. + # Fixed in Bitmessage version 0.4.2 (in source code on 2013-10-13) + while self.RAND_bytes(buffer_, size) != 1: + import time + time.sleep(1) + return buffer_.raw + + def malloc(self, data, size): + """ + returns a create_string_buffer (ctypes) + """ + buffer_ = None + if data != 0: + if sys.version_info.major == 3 and isinstance(data, type('')): + data = data.encode() + buffer_ = self.create_string_buffer(data, size) + else: + buffer_ = self.create_string_buffer(size) + return buffer_ + + +def loadOpenSSL(): + """This function finds and load the OpenSSL library""" + # pylint: disable=global-statement + global OpenSSL + from os import path, environ + from ctypes.util import find_library + + libdir = [] + if getattr(sys, 'frozen', None): + if 'darwin' in sys.platform: + libdir.extend([ + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.1.1.0.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.1.0.2.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.1.0.1.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.1.0.0.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.0.9.8.dylib'), + ]) + elif 'win32' in sys.platform or 'win64' in sys.platform: + libdir.append(path.join(sys._MEIPASS, 'libeay32.dll')) + else: + libdir.extend([ + path.join(sys._MEIPASS, 'libcrypto.so'), + path.join(sys._MEIPASS, 'libssl.so'), + path.join(sys._MEIPASS, 'libcrypto.so.1.1.0'), + path.join(sys._MEIPASS, 'libssl.so.1.1.0'), + path.join(sys._MEIPASS, 'libcrypto.so.1.0.2'), + path.join(sys._MEIPASS, 'libssl.so.1.0.2'), + path.join(sys._MEIPASS, 'libcrypto.so.1.0.1'), + path.join(sys._MEIPASS, 'libssl.so.1.0.1'), + path.join(sys._MEIPASS, 'libcrypto.so.1.0.0'), + path.join(sys._MEIPASS, 'libssl.so.1.0.0'), + path.join(sys._MEIPASS, 'libcrypto.so.0.9.8'), + path.join(sys._MEIPASS, 'libssl.so.0.9.8'), + ]) + if 'darwin' in sys.platform: + libdir.extend([ + 'libcrypto.dylib', '/usr/local/opt/openssl/lib/libcrypto.dylib']) + elif 'win32' in sys.platform or 'win64' in sys.platform: + libdir.append('libeay32.dll') + else: + libdir.append('libcrypto.so') + libdir.append('libssl.so') + libdir.append('libcrypto.so.1.0.0') + libdir.append('libssl.so.1.0.0') + if 'linux' in sys.platform or 'darwin' in sys.platform \ + or 'bsd' in sys.platform: + libdir.append(find_library('ssl')) + elif 'win32' in sys.platform or 'win64' in sys.platform: + libdir.append(find_library('libeay32')) + for library in libdir: + try: + OpenSSL = _OpenSSL(library) + return + except Exception: + pass + raise Exception( + "Couldn't find and load the OpenSSL library. You must install it.") + + +loadOpenSSL() diff --git a/src/tests/mock/pybitmessage/pathmagic.py b/src/tests/mock/pybitmessage/pathmagic.py new file mode 100644 index 00000000..3f32c0c1 --- /dev/null +++ b/src/tests/mock/pybitmessage/pathmagic.py @@ -0,0 +1,10 @@ +import os +import sys + + +def setup(): + """Add path to this file to sys.path""" + app_dir = os.path.dirname(os.path.abspath(__file__)) + os.chdir(app_dir) + sys.path.insert(0, app_dir) + return app_dir diff --git a/src/tests/mock/pybitmessage/paths.py b/src/tests/mock/pybitmessage/paths.py new file mode 100644 index 00000000..e2f8c97e --- /dev/null +++ b/src/tests/mock/pybitmessage/paths.py @@ -0,0 +1,131 @@ +""" +Path related functions +""" +import logging +import os +import re +import sys +from datetime import datetime +from shutil import move + +logger = logging.getLogger('default') + +# When using py2exe or py2app, the variable frozen is added to the sys +# namespace. This can be used to setup a different code path for +# binary distributions vs source distributions. +frozen = getattr(sys, 'frozen', None) + + +def lookupExeFolder(): + """Returns executable folder path""" + if frozen: + exeFolder = ( + # targetdir/Bitmessage.app/Contents/MacOS/Bitmessage + os.path.dirname(sys.executable).split(os.path.sep)[0] + os.path.sep + if frozen == "macosx_app" else + os.path.dirname(sys.executable) + os.path.sep) + elif __file__: + exeFolder = os.path.dirname(__file__) + os.path.sep + else: + exeFolder = '' + return exeFolder + + +def lookupAppdataFolder(): + """Returns path of the folder where application data is stored""" + APPNAME = "PyBitmessage" + dataFolder = os.environ.get('BITMESSAGE_HOME') + if dataFolder: + if dataFolder[-1] not in (os.path.sep, os.path.altsep): + dataFolder += os.path.sep + elif sys.platform == 'darwin': + try: + dataFolder = os.path.join( + os.environ['HOME'], + 'Library/Application Support/', APPNAME + ) + '/' + + except KeyError: + sys.exit( + 'Could not find home folder, please report this message' + ' and your OS X version to the BitMessage Github.') + elif 'win32' in sys.platform or 'win64' in sys.platform: + dataFolder = os.path.join( + os.environ['APPDATA'].decode( + sys.getfilesystemencoding(), 'ignore'), APPNAME + ) + os.path.sep + else: + try: + dataFolder = os.path.join(os.environ['XDG_CONFIG_HOME'], APPNAME) + except KeyError: + dataFolder = os.path.join(os.environ['HOME'], '.config', APPNAME) + + # Migrate existing data to the proper location + # if this is an existing install + try: + move(os.path.join(os.environ['HOME'], '.%s' % APPNAME), dataFolder) + logger.info('Moving data folder to %s', dataFolder) + except IOError: + # Old directory may not exist. + pass + dataFolder = dataFolder + os.path.sep + return dataFolder + + +def codePath(): + """Returns path to the program sources""" + if not frozen: + return os.path.dirname(__file__) + return ( + os.environ.get('RESOURCEPATH') + # pylint: disable=protected-access + if frozen == "macosx_app" else sys._MEIPASS) + + +def tail(f, lines=20): + """Returns last lines in the f file object""" + total_lines_wanted = lines + + BLOCK_SIZE = 1024 + f.seek(0, 2) + block_end_byte = f.tell() + lines_to_go = total_lines_wanted + block_number = -1 + # blocks of size BLOCK_SIZE, in reverse order starting + # from the end of the file + blocks = [] + while lines_to_go > 0 and block_end_byte > 0: + if block_end_byte - BLOCK_SIZE > 0: + # read the last block we haven't yet read + f.seek(block_number * BLOCK_SIZE, 2) + blocks.append(f.read(BLOCK_SIZE)) + else: + # file too small, start from begining + f.seek(0, 0) + # only read what was not read + blocks.append(f.read(block_end_byte)) + lines_found = blocks[-1].count('\n') + lines_to_go -= lines_found + block_end_byte -= BLOCK_SIZE + block_number -= 1 + all_read_text = ''.join(reversed(blocks)) + return '\n'.join(all_read_text.splitlines()[-total_lines_wanted:]) + + +def lastCommit(): + """ + Returns last commit information as dict with 'commit' and 'time' keys + """ + githeadfile = os.path.join(codePath(), '..', '.git', 'logs', 'HEAD') + result = {} + if os.path.isfile(githeadfile): + try: + with open(githeadfile, 'rt') as githead: + line = tail(githead, 1) + result['commit'] = line.split()[1] + result['time'] = datetime.fromtimestamp( + float(re.search(r'>\s*(.*?)\s', line).group(1)) + ) + except (IOError, AttributeError, TypeError): + pass + return result diff --git a/src/tests/mock/pybitmessage/proofofwork.py b/src/tests/mock/pybitmessage/proofofwork.py new file mode 100644 index 00000000..148d6734 --- /dev/null +++ b/src/tests/mock/pybitmessage/proofofwork.py @@ -0,0 +1,394 @@ +# pylint: disable=too-many-branches,too-many-statements,protected-access +""" +Proof of work calculation +""" + +import ctypes +import hashlib +import os +import sys +import tempfile +import time +from struct import pack, unpack +from subprocess import call + +import openclpow +import paths +import queues +import state +import tr +from bmconfigparser import BMConfigParser +from debug import logger + +bitmsglib = 'bitmsghash.so' +bmpow = None + + +class LogOutput(object): # pylint: disable=too-few-public-methods + """ + A context manager that block stdout for its scope + and appends it's content to log before exit. Usage:: + + with LogOutput(): + os.system('ls -l') + + https://stackoverflow.com/questions/5081657 + """ + + def __init__(self, prefix='PoW'): + self.prefix = prefix + try: + sys.stdout.flush() + self._stdout = sys.stdout + self._stdout_fno = os.dup(sys.stdout.fileno()) + except AttributeError: + # NullWriter instance has no attribute 'fileno' on Windows + self._stdout = None + else: + self._dst, self._filepath = tempfile.mkstemp() + + def __enter__(self): + if not self._stdout: + return + stdout = os.dup(1) + os.dup2(self._dst, 1) + os.close(self._dst) + sys.stdout = os.fdopen(stdout, 'w') + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self._stdout: + return + sys.stdout.close() + sys.stdout = self._stdout + sys.stdout.flush() + os.dup2(self._stdout_fno, 1) + + with open(self._filepath) as out: + for line in out: + logger.info('%s: %s', self.prefix, line) + os.remove(self._filepath) + + +def _set_idle(): + if 'linux' in sys.platform: + os.nice(20) + else: + try: + # pylint: disable=no-member,import-error + sys.getwindowsversion() + import win32api + import win32process + import win32con + pid = win32api.GetCurrentProcessId() + handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid) + win32process.SetPriorityClass(handle, win32process.IDLE_PRIORITY_CLASS) + except: # noqa:E722 + # Windows 64-bit + pass + + +def _pool_worker(nonce, initialHash, target, pool_size): + _set_idle() + trialValue = float('inf') + while trialValue > target: + nonce += pool_size + trialValue, = unpack('>Q', hashlib.sha512(hashlib.sha512( + pack('>Q', nonce) + initialHash).digest()).digest()[0:8]) + return [trialValue, nonce] + + +def _doSafePoW(target, initialHash): + logger.debug("Safe PoW start") + nonce = 0 + trialValue = float('inf') + while trialValue > target and state.shutdown == 0: + nonce += 1 + trialValue, = unpack('>Q', hashlib.sha512(hashlib.sha512( + pack('>Q', nonce) + initialHash).digest()).digest()[0:8]) + if state.shutdown != 0: + raise StopIteration("Interrupted") # pylint: misplaced-bare-raise + logger.debug("Safe PoW done") + return [trialValue, nonce] + + +def _doFastPoW(target, initialHash): + logger.debug("Fast PoW start") + from multiprocessing import Pool, cpu_count + try: + pool_size = cpu_count() + except: # noqa:E722 + pool_size = 4 + try: + maxCores = BMConfigParser().getint('bitmessagesettings', 'maxcores') + except: # noqa:E722 + maxCores = 99999 + if pool_size > maxCores: + pool_size = maxCores + + pool = Pool(processes=pool_size) + result = [] + for i in range(pool_size): + result.append(pool.apply_async(_pool_worker, args=(i, initialHash, target, pool_size))) + + while True: + if state.shutdown > 0: + try: + pool.terminate() + pool.join() + except: # noqa:E722 + pass + raise StopIteration("Interrupted") + for i in range(pool_size): + if result[i].ready(): + try: + result[i].successful() + except AssertionError: + pool.terminate() + pool.join() + raise StopIteration("Interrupted") + result = result[i].get() + pool.terminate() + pool.join() + logger.debug("Fast PoW done") + return result[0], result[1] + time.sleep(0.2) + + +def _doCPoW(target, initialHash): + with LogOutput(): + h = initialHash + m = target + out_h = ctypes.pointer(ctypes.create_string_buffer(h, 64)) + out_m = ctypes.c_ulonglong(m) + logger.debug("C PoW start") + nonce = bmpow(out_h, out_m) + + trialValue, = unpack('>Q', hashlib.sha512(hashlib.sha512(pack('>Q', nonce) + initialHash).digest()).digest()[0:8]) + if state.shutdown != 0: + raise StopIteration("Interrupted") + logger.debug("C PoW done") + return [trialValue, nonce] + + +def _doGPUPoW(target, initialHash): + logger.debug("GPU PoW start") + nonce = openclpow.do_opencl_pow(initialHash.encode("hex"), target) + trialValue, = unpack('>Q', hashlib.sha512(hashlib.sha512(pack('>Q', nonce) + initialHash).digest()).digest()[0:8]) + if trialValue > target: + deviceNames = ", ".join(gpu.name for gpu in openclpow.enabledGpus) + queues.UISignalQueue.put(( + 'updateStatusBar', ( + tr._translate( + "MainWindow", + 'Your GPU(s) did not calculate correctly, disabling OpenCL. Please report to the developers.' + ), + 1))) + logger.error( + "Your GPUs (%s) did not calculate correctly, disabling OpenCL. Please report to the developers.", + deviceNames) + openclpow.enabledGpus = [] + raise Exception("GPU did not calculate correctly.") + if state.shutdown != 0: + raise StopIteration("Interrupted") + logger.debug("GPU PoW done") + return [trialValue, nonce] + + +def estimate(difficulty, format=False): # pylint: disable=redefined-builtin + """ + .. todo: fix unused variable + """ + ret = difficulty / 10 + if ret < 1: + ret = 1 + + if format: + # pylint: disable=unused-variable + out = str(int(ret)) + " seconds" + if ret > 60: + ret /= 60 + out = str(int(ret)) + " minutes" + if ret > 60: + ret /= 60 + out = str(int(ret)) + " hours" + if ret > 24: + ret /= 24 + out = str(int(ret)) + " days" + if ret > 7: + out = str(int(ret)) + " weeks" + if ret > 31: + out = str(int(ret)) + " months" + if ret > 366: + ret /= 366 + out = str(int(ret)) + " years" + ret = None # Ensure legacy behaviour + + return ret + + +def getPowType(): + """Get the proof of work implementation""" + + if openclpow.openclEnabled(): + return "OpenCL" + if bmpow: + return "C" + return "python" + + +def notifyBuild(tried=False): + """Notify the user of the success or otherwise of building the PoW C module""" + + if bmpow: + queues.UISignalQueue.put(('updateStatusBar', (tr._translate( + "proofofwork", "C PoW module built successfully."), 1))) + elif tried: + queues.UISignalQueue.put( + ( + 'updateStatusBar', ( + tr._translate( + "proofofwork", + "Failed to build C PoW module. Please build it manually." + ), + 1 + ) + ) + ) + else: + queues.UISignalQueue.put(('updateStatusBar', (tr._translate( + "proofofwork", "C PoW module unavailable. Please build it."), 1))) + + +def buildCPoW(): + """Attempt to build the PoW C module""" + if bmpow is not None: + return + if paths.frozen is not None: + notifyBuild(False) + return + if sys.platform in ["win32", "win64"]: + notifyBuild(False) + return + try: + if "bsd" in sys.platform: + # BSD make + call(["make", "-C", os.path.join(paths.codePath(), "bitmsghash"), '-f', 'Makefile.bsd']) + else: + # GNU make + call(["make", "-C", os.path.join(paths.codePath(), "bitmsghash")]) + if os.path.exists(os.path.join(paths.codePath(), "bitmsghash", "bitmsghash.so")): + init() + notifyBuild(True) + else: + notifyBuild(True) + except: # noqa:E722 + notifyBuild(True) + + +def run(target, initialHash): + """Run the proof of work thread""" + + if state.shutdown != 0: + raise # pylint: disable=misplaced-bare-raise + target = int(target) + if openclpow.openclEnabled(): + try: + return _doGPUPoW(target, initialHash) + except StopIteration: + raise + except: # noqa:E722 + pass # fallback + if bmpow: + try: + return _doCPoW(target, initialHash) + except StopIteration: + raise + except: # noqa:E722 + pass # fallback + if paths.frozen == "macosx_app" or not paths.frozen: + # on my (Peter Surda) Windows 10, Windows Defender + # does not like this and fights with PyBitmessage + # over CPU, resulting in very slow PoW + # added on 2015-11-29: multiprocesing.freeze_support() doesn't help + try: + return _doFastPoW(target, initialHash) + except StopIteration: + logger.error("Fast PoW got StopIteration") + raise + except: # noqa:E722 + logger.error("Fast PoW got exception:", exc_info=True) + try: + return _doSafePoW(target, initialHash) + except StopIteration: + raise + except: # noqa:E722 + pass # fallback + + +def resetPoW(): + """Initialise the OpenCL PoW""" + openclpow.initCL() + + +# init + + +def init(): + """Initialise PoW""" + # pylint: disable=global-statement + global bitmsglib, bmpow + + openclpow.initCL() + if sys.platform == "win32": + if ctypes.sizeof(ctypes.c_voidp) == 4: + bitmsglib = 'bitmsghash32.dll' + else: + bitmsglib = 'bitmsghash64.dll' + try: + # MSVS + bso = ctypes.WinDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) + logger.info("Loaded C PoW DLL (stdcall) %s", bitmsglib) + bmpow = bso.BitmessagePOW + bmpow.restype = ctypes.c_ulonglong + _doCPoW(2**63, "") + logger.info("Successfully tested C PoW DLL (stdcall) %s", bitmsglib) + except ValueError: + try: + # MinGW + bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) + logger.info("Loaded C PoW DLL (cdecl) %s", bitmsglib) + bmpow = bso.BitmessagePOW + bmpow.restype = ctypes.c_ulonglong + _doCPoW(2**63, "") + logger.info("Successfully tested C PoW DLL (cdecl) %s", bitmsglib) + except Exception as e: + logger.error("Error: %s", e, exc_info=True) + bso = None + except Exception as e: + logger.error("Error: %s", e, exc_info=True) + bso = None + else: + try: + bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) + except OSError: + import glob + try: + bso = ctypes.CDLL(glob.glob(os.path.join( + paths.codePath(), "bitmsghash", "bitmsghash*.so" + ))[0]) + except (OSError, IndexError): + bso = None + except: # noqa:E722 + bso = None + else: + logger.info("Loaded C PoW DLL %s", bitmsglib) + if bso: + try: + bmpow = bso.BitmessagePOW + bmpow.restype = ctypes.c_ulonglong + except: # noqa:E722 + bmpow = None + else: + bmpow = None + if bmpow is None: + buildCPoW() diff --git a/src/tests/mock/pybitmessage/protocol.py b/src/tests/mock/pybitmessage/protocol.py new file mode 100644 index 00000000..1934d9cc --- /dev/null +++ b/src/tests/mock/pybitmessage/protocol.py @@ -0,0 +1,524 @@ +""" +Low-level protocol-related functions. +""" +# pylint: disable=too-many-boolean-expressions,too-many-return-statements +# pylint: disable=too-many-locals,too-many-statements + +import base64 +import hashlib +import random +import socket +import sys +import time +from binascii import hexlify +from struct import Struct, pack, unpack + +import defaults +import highlevelcrypto +import state +from addresses import ( + encodeVarint, decodeVarint, decodeAddress, varintDecodeError) +from bmconfigparser import BMConfigParser +from debug import logger +from fallback import RIPEMD160Hash +from helper_sql import sqlExecute +from version import softwareVersion + +# Service flags +#: This is a normal network node +NODE_NETWORK = 1 +#: This node supports SSL/TLS in the current connect (python < 2.7.9 +#: only supports an SSL client, so in that case it would only have this +#: on when the connection is a client). +NODE_SSL = 2 +# (Proposal) This node may do PoW on behalf of some its peers +# (PoW offloading/delegating), but it doesn't have to. Clients may have +# to meet additional requirements (e.g. TLS authentication) +# NODE_POW = 4 +#: Node supports dandelion +NODE_DANDELION = 8 + +# Bitfield flags +BITFIELD_DOESACK = 1 + +# Error types +STATUS_WARNING = 0 +STATUS_ERROR = 1 +STATUS_FATAL = 2 + +# Object types +OBJECT_GETPUBKEY = 0 +OBJECT_PUBKEY = 1 +OBJECT_MSG = 2 +OBJECT_BROADCAST = 3 +OBJECT_ONIONPEER = 0x746f72 +OBJECT_I2P = 0x493250 +OBJECT_ADDR = 0x61646472 + +eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack( + '>Q', random.randrange(1, 18446744073709551615)) + +# Compiled struct for packing/unpacking headers +# New code should use CreatePacket instead of Header.pack +Header = Struct('!L12sL4s') + +VersionPacket = Struct('>LqQ20s4s36sH') + +# Bitfield + + +def getBitfield(address): + """Get a bitfield from an address""" + # bitfield of features supported by me (see the wiki). + bitfield = 0 + # send ack + if not BMConfigParser().safeGetBoolean(address, 'dontsendack'): + bitfield |= BITFIELD_DOESACK + return pack('>I', bitfield) + + +def checkBitfield(bitfieldBinary, flags): + """Check if a bitfield matches the given flags""" + bitfield, = unpack('>I', bitfieldBinary) + return (bitfield & flags) == flags + + +def isBitSetWithinBitfield(fourByteString, n): + """Check if a particular bit is set in a bitfeld""" + # Uses MSB 0 bit numbering across 4 bytes of data + n = 31 - n + x, = unpack('>L', fourByteString) + return x & 2**n != 0 + + +# IP addresses + + +def encodeHost(host): + """Encode a given host to be used in low-level socket operations""" + if host.find('.onion') > -1: + return b'\xfd\x87\xd8\x7e\xeb\x43' + base64.b32decode( + host.split(".")[0], True) + elif host.find(':') == -1: + return b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + \ + socket.inet_aton(host) + return socket.inet_pton(socket.AF_INET6, host) + + +def networkType(host): + """Determine if a host is IPv4, IPv6 or an onion address""" + if host.find('.onion') > -1: + return 'onion' + elif host.find(':') == -1: + return 'IPv4' + return 'IPv6' + + +def network_group(host): + """Canonical identifier of network group + simplified, borrowed from + GetGroup() in src/netaddresses.cpp in bitcoin core""" + if not isinstance(host, str): + return None + network_type = networkType(host) + try: + raw_host = encodeHost(host) + except socket.error: + return host + if network_type == 'IPv4': + decoded_host = checkIPv4Address(raw_host[12:], True) + if decoded_host: + # /16 subnet + return raw_host[12:14] + elif network_type == 'IPv6': + decoded_host = checkIPv6Address(raw_host, True) + if decoded_host: + # /32 subnet + return raw_host[0:12] + else: + # just host, e.g. for tor + return host + # global network type group for local, private, unroutable + return network_type + + +def checkIPAddress(host, private=False): + """ + Returns hostStandardFormat if it is a valid IP address, + otherwise returns False + """ + if host[0:12] == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF': + hostStandardFormat = socket.inet_ntop(socket.AF_INET, host[12:]) + return checkIPv4Address(host[12:], hostStandardFormat, private) + elif host[0:6] == b'\xfd\x87\xd8\x7e\xeb\x43': + # Onion, based on BMD/bitcoind + hostStandardFormat = base64.b32encode(host[6:]).lower() + ".onion" + if private: + return False + return hostStandardFormat + else: + try: + hostStandardFormat = socket.inet_ntop(socket.AF_INET6, host) + except ValueError: + return False + if len(hostStandardFormat) == 0: + # This can happen on Windows systems which are + # not 64-bit compatible so let us drop the IPv6 address. + return False + return checkIPv6Address(host, hostStandardFormat, private) + + +def checkIPv4Address(host, hostStandardFormat, private=False): + """ + Returns hostStandardFormat if it is an IPv4 address, + otherwise returns False + """ + if host[0:1] == b'\x7F': # 127/8 + if not private: + logger.debug( + 'Ignoring IP address in loopback range: %s', + hostStandardFormat) + return hostStandardFormat if private else False + if host[0:1] == b'\x0A': # 10/8 + if not private: + logger.debug( + 'Ignoring IP address in private range: %s', hostStandardFormat) + return hostStandardFormat if private else False + if host[0:2] == b'\xC0\xA8': # 192.168/16 + if not private: + logger.debug( + 'Ignoring IP address in private range: %s', hostStandardFormat) + return hostStandardFormat if private else False + if host[0:2] >= b'\xAC\x10' and host[0:2] < b'\xAC\x20': # 172.16/12 + if not private: + logger.debug( + 'Ignoring IP address in private range: %s', hostStandardFormat) + return hostStandardFormat if private else False + return False if private else hostStandardFormat + + +def checkIPv6Address(host, hostStandardFormat, private=False): + """ + Returns hostStandardFormat if it is an IPv6 address, + otherwise returns False + """ + if host == b'\x00' * 15 + b'\x01': + if not private: + logger.debug('Ignoring loopback address: %s', hostStandardFormat) + return False + try: + host = [ord(c) for c in host[:2]] + except TypeError: # python3 has ints already + pass + if host[0] == 0xfe and host[1] & 0xc0 == 0x80: + if not private: + logger.debug('Ignoring local address: %s', hostStandardFormat) + return hostStandardFormat if private else False + if host[0] & 0xfe == 0xfc: + if not private: + logger.debug( + 'Ignoring unique local address: %s', hostStandardFormat) + return hostStandardFormat if private else False + return False if private else hostStandardFormat + + +def haveSSL(server=False): + """ + Predicate to check if ECDSA server support is required and available + + python < 2.7.9's ssl library does not support ECDSA server due to + missing initialisation of available curves, but client works ok + """ + if not server: + return True + elif sys.version_info >= (2, 7, 9): + return True + return False + + +def checkSocksIP(host): + """Predicate to check if we're using a SOCKS proxy""" + sockshostname = BMConfigParser().safeGet( + 'bitmessagesettings', 'sockshostname') + try: + if not state.socksIP: + state.socksIP = socket.gethostbyname(sockshostname) + except NameError: # uninitialised + state.socksIP = socket.gethostbyname(sockshostname) + except (TypeError, socket.gaierror): # None, resolving failure + state.socksIP = sockshostname + return state.socksIP == host + + +def isProofOfWorkSufficient( + data, nonceTrialsPerByte=0, payloadLengthExtraBytes=0, recvTime=0): + """ + Validate an object's Proof of Work using method described + `here `_ + + Arguments: + int nonceTrialsPerByte (default: from `.defaults`) + int payloadLengthExtraBytes (default: from `.defaults`) + float recvTime (optional) UNIX epoch time when object was + received from the network (default: current system time) + Returns: + True if PoW valid and sufficient, False in all other cases + """ + if nonceTrialsPerByte < defaults.networkDefaultProofOfWorkNonceTrialsPerByte: + nonceTrialsPerByte = defaults.networkDefaultProofOfWorkNonceTrialsPerByte + if payloadLengthExtraBytes < defaults.networkDefaultPayloadLengthExtraBytes: + payloadLengthExtraBytes = defaults.networkDefaultPayloadLengthExtraBytes + endOfLifeTime, = unpack('>Q', data[8:16]) + TTL = endOfLifeTime - (int(recvTime) if recvTime else int(time.time())) + if TTL < 300: + TTL = 300 + POW, = unpack('>Q', hashlib.sha512(hashlib.sha512( + data[:8] + hashlib.sha512(data[8:]).digest() + ).digest()).digest()[0:8]) + return POW <= 2 ** 64 / ( + nonceTrialsPerByte * ( + len(data) + payloadLengthExtraBytes + + ((TTL * (len(data) + payloadLengthExtraBytes)) / (2 ** 16)))) + + +# Packet creation + + +def CreatePacket(command, payload=b''): + """Construct and return a packet""" + payload_length = len(payload) + checksum = hashlib.sha512(payload).digest()[0:4] + + b = bytearray(Header.size + payload_length) + Header.pack_into(b, 0, 0xE9BEB4D9, command, payload_length, checksum) + b[Header.size:] = payload + return bytes(b) + + +def assembleVersionMessage( + remoteHost, remotePort, participatingStreams, server=False, nodeid=None +): + """ + Construct the payload of a version message, + return the resulting bytes of running `CreatePacket` on it + """ + payload = b'' + payload += pack('>L', 3) # protocol version. + # bitflags of the services I offer. + payload += pack( + '>q', + NODE_NETWORK + | (NODE_SSL if haveSSL(server) else 0) + | (NODE_DANDELION if state.dandelion else 0) + ) + payload += pack('>q', int(time.time())) + + # boolservices of remote connection; ignored by the remote host. + payload += pack('>q', 1) + if checkSocksIP(remoteHost) and server: + # prevent leaking of tor outbound IP + payload += encodeHost('127.0.0.1') + payload += pack('>H', 8444) + else: + # use first 16 bytes if host data is longer + # for example in case of onion v3 service + try: + payload += encodeHost(remoteHost)[:16] + except socket.error: + payload += encodeHost('127.0.0.1') + payload += pack('>H', remotePort) # remote IPv6 and port + + # bitflags of the services I offer. + payload += pack( + '>q', + NODE_NETWORK + | (NODE_SSL if haveSSL(server) else 0) + | (NODE_DANDELION if state.dandelion else 0) + ) + # = 127.0.0.1. This will be ignored by the remote host. + # The actual remote connected IP will be used. + payload += b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack( + '>L', 2130706433) + # we have a separate extPort and incoming over clearnet + # or outgoing through clearnet + extport = BMConfigParser().safeGetInt('bitmessagesettings', 'extport') + if ( + extport and ((server and not checkSocksIP(remoteHost)) or ( + BMConfigParser().get('bitmessagesettings', 'socksproxytype') + == 'none' and not server)) + ): + payload += pack('>H', extport) + elif checkSocksIP(remoteHost) and server: # incoming connection over Tor + payload += pack( + '>H', BMConfigParser().getint('bitmessagesettings', 'onionport')) + else: # no extport and not incoming over Tor + payload += pack( + '>H', BMConfigParser().getint('bitmessagesettings', 'port')) + + if nodeid is not None: + payload += nodeid[0:8] + else: + payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf + userAgent = ('/PyBitmessage:%s/' % softwareVersion).encode('utf-8') + payload += encodeVarint(len(userAgent)) + payload += userAgent + + # Streams + payload += encodeVarint(len(participatingStreams)) + count = 0 + for stream in sorted(participatingStreams): + payload += encodeVarint(stream) + count += 1 + # protocol limit, see specification + if count >= 160000: + break + + return CreatePacket(b'version', payload) + + +def assembleErrorMessage(fatal=0, banTime=0, inventoryVector='', errorText=''): + """ + Construct the payload of an error message, + return the resulting bytes of running `CreatePacket` on it + """ + payload = encodeVarint(fatal) + payload += encodeVarint(banTime) + payload += encodeVarint(len(inventoryVector)) + payload += inventoryVector + payload += encodeVarint(len(errorText)) + payload += errorText + return CreatePacket(b'error', payload) + + +# Packet decoding + + +def decryptAndCheckPubkeyPayload(data, address): + """ + Version 4 pubkeys are encrypted. This function is run when we + already have the address to which we want to try to send a message. + The 'data' may come either off of the wire or we might have had it + already in our inventory when we tried to send a msg to this + particular address. + """ + try: + addressVersion, streamNumber, ripe = decodeAddress(address)[1:] + + readPosition = 20 # bypass the nonce, time, and object type + embeddedAddressVersion, varintLength = decodeVarint( + data[readPosition:readPosition + 10]) + readPosition += varintLength + embeddedStreamNumber, varintLength = decodeVarint( + data[readPosition:readPosition + 10]) + readPosition += varintLength + # We'll store the address version and stream number + # (and some more) in the pubkeys table. + storedData = data[20:readPosition] + + if addressVersion != embeddedAddressVersion: + logger.info( + 'Pubkey decryption was UNsuccessful' + ' due to address version mismatch.') + return 'failed' + if streamNumber != embeddedStreamNumber: + logger.info( + 'Pubkey decryption was UNsuccessful' + ' due to stream number mismatch.') + return 'failed' + + tag = data[readPosition:readPosition + 32] + readPosition += 32 + # the time through the tag. More data is appended onto + # signedData below after the decryption. + signedData = data[8:readPosition] + encryptedData = data[readPosition:] + + # Let us try to decrypt the pubkey + toAddress, cryptorObject = state.neededPubkeys[tag] + if toAddress != address: + logger.critical( + 'decryptAndCheckPubkeyPayload failed due to toAddress' + ' mismatch. This is very peculiar.' + ' toAddress: %s, address %s', + toAddress, address + ) + # the only way I can think that this could happen + # is if someone encodes their address data two different ways. + # That sort of address-malleability should have been caught + # by the UI or API and an error given to the user. + return 'failed' + try: + decryptedData = cryptorObject.decrypt(encryptedData) + except: # noqa:E722 + # FIXME: use a proper exception after `pyelliptic.ecc` is refactored. + # Someone must have encrypted some data with a different key + # but tagged it with a tag for which we are watching. + logger.info('Pubkey decryption was unsuccessful.') + return 'failed' + + readPosition = 0 + # bitfieldBehaviors = decryptedData[readPosition:readPosition + 4] + readPosition += 4 + publicSigningKey = '\x04' + decryptedData[readPosition:readPosition + 64] + readPosition += 64 + publicEncryptionKey = '\x04' + decryptedData[readPosition:readPosition + 64] + readPosition += 64 + specifiedNonceTrialsPerByteLength = decodeVarint( + decryptedData[readPosition:readPosition + 10])[1] + readPosition += specifiedNonceTrialsPerByteLength + specifiedPayloadLengthExtraBytesLength = decodeVarint( + decryptedData[readPosition:readPosition + 10])[1] + readPosition += specifiedPayloadLengthExtraBytesLength + storedData += decryptedData[:readPosition] + signedData += decryptedData[:readPosition] + signatureLength, signatureLengthLength = decodeVarint( + decryptedData[readPosition:readPosition + 10]) + readPosition += signatureLengthLength + signature = decryptedData[readPosition:readPosition + signatureLength] + + if not highlevelcrypto.verify( + signedData, signature, hexlify(publicSigningKey)): + logger.info( + 'ECDSA verify failed (within decryptAndCheckPubkeyPayload)') + return 'failed' + + logger.info( + 'ECDSA verify passed (within decryptAndCheckPubkeyPayload)') + + sha = hashlib.new('sha512') + sha.update(publicSigningKey + publicEncryptionKey) + embeddedRipe = RIPEMD160Hash(sha.digest()).digest() + + if embeddedRipe != ripe: + # Although this pubkey object had the tag were were looking for + # and was encrypted with the correct encryption key, + # it doesn't contain the correct pubkeys. Someone is + # either being malicious or using buggy software. + logger.info( + 'Pubkey decryption was UNsuccessful due to RIPE mismatch.') + return 'failed' + + # Everything checked out. Insert it into the pubkeys table. + + logger.info( + 'within decryptAndCheckPubkeyPayload, ' + 'addressVersion: %s, streamNumber: %s\nripe %s\n' + 'publicSigningKey in hex: %s\npublicEncryptionKey in hex: %s', + addressVersion, streamNumber, hexlify(ripe), + hexlify(publicSigningKey), hexlify(publicEncryptionKey) + ) + + t = (address, addressVersion, storedData, int(time.time()), 'yes') + sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t) + return 'successful' + except varintDecodeError: + logger.info( + 'Pubkey decryption was UNsuccessful due to a malformed varint.') + return 'failed' + except Exception: + logger.critical( + 'Pubkey decryption was UNsuccessful because of' + ' an unhandled exception! This is definitely a bug!', + exc_info=True + ) + return 'failed' diff --git a/src/tests/mock/pybitmessage/pybitmessage b/src/tests/mock/pybitmessage/pybitmessage new file mode 100644 index 00000000..decebfff --- /dev/null +++ b/src/tests/mock/pybitmessage/pybitmessage @@ -0,0 +1,11 @@ +#!/usr/bin/python2.7 + +import os +import pkg_resources + +dist = pkg_resources.get_distribution('pybitmessage') +script_file = os.path.join(dist.location, dist.key, 'bitmessagemain.py') +new_globals = globals() +new_globals.update(__file__=script_file) + +execfile(script_file, new_globals) diff --git a/src/tests/mock/pybitmessage/pyelliptic/__init__.py b/src/tests/mock/pybitmessage/pyelliptic/__init__.py new file mode 100644 index 00000000..cafa89c9 --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/__init__.py @@ -0,0 +1,30 @@ +""" +Copyright (C) 2010 +Author: Yann GUIBET +Contact: + +Python OpenSSL wrapper. +For modern cryptography with ECC, AES, HMAC, Blowfish, ... + +This is an abandoned package maintained inside of the PyBitmessage. +""" + +from .cipher import Cipher +from .ecc import ECC +from .eccblind import ECCBlind +from .eccblindchain import ECCBlindChain +from .hash import hmac_sha256, hmac_sha512, pbkdf2 +from .openssl import OpenSSL + +__version__ = '1.3' + +__all__ = [ + 'OpenSSL', + 'ECC', + 'ECCBlind', + 'ECCBlindChain', + 'Cipher', + 'hmac_sha256', + 'hmac_sha512', + 'pbkdf2' +] diff --git a/src/tests/mock/pybitmessage/pyelliptic/arithmetic.py b/src/tests/mock/pybitmessage/pyelliptic/arithmetic.py new file mode 100644 index 00000000..23c24b5e --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/arithmetic.py @@ -0,0 +1,166 @@ +""" +Arithmetic Expressions +""" +import hashlib +import re + +P = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 - 1 +A = 0 +Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240 +Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424 +G = (Gx, Gy) + + +def inv(a, n): + """Inversion""" + lm, hm = 1, 0 + low, high = a % n, n + while low > 1: + r = high // low + nm, new = hm - lm * r, high - low * r + lm, low, hm, high = nm, new, lm, low + return lm % n + + +def get_code_string(base): + """Returns string according to base value""" + if base == 2: + return b'01' + if base == 10: + return b'0123456789' + if base == 16: + return b'0123456789abcdef' + if base == 58: + return b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' + if base == 256: + try: + return b''.join([chr(x) for x in range(256)]) + except TypeError: + return bytes([x for x in range(256)]) + + raise ValueError("Invalid base!") + + +def encode(val, base, minlen=0): + """Returns the encoded string""" + code_string = get_code_string(base) + result = b'' + while val > 0: + val, i = divmod(val, base) + result = code_string[i:i + 1] + result + if len(result) < minlen: + result = code_string[0:1] * (minlen - len(result)) + result + return result + + +def decode(string, base): + """Returns the decoded string""" + code_string = get_code_string(base) + result = 0 + if base == 16: + string = string.lower() + while string: + result *= base + result += code_string.find(string[0]) + string = string[1:] + return result + + +def changebase(string, frm, to, minlen=0): + """Change base of the string""" + return encode(decode(string, frm), to, minlen) + + +def base10_add(a, b): + """Adding the numbers that are of base10""" + # pylint: disable=too-many-function-args + if a is None: + return b[0], b[1] + if b is None: + return a[0], a[1] + if a[0] == b[0]: + if a[1] == b[1]: + return base10_double(a[0], a[1]) + return None + m = ((b[1] - a[1]) * inv(b[0] - a[0], P)) % P + x = (m * m - a[0] - b[0]) % P + y = (m * (a[0] - x) - a[1]) % P + return (x, y) + + +def base10_double(a): + """Double the numbers that are of base10""" + if a is None: + return None + m = ((3 * a[0] * a[0] + A) * inv(2 * a[1], P)) % P + x = (m * m - 2 * a[0]) % P + y = (m * (a[0] - x) - a[1]) % P + return (x, y) + + +def base10_multiply(a, n): + """Multiply the numbers that are of base10""" + if n == 0: + return G + if n == 1: + return a + n, m = divmod(n, 2) + if m == 0: + return base10_double(base10_multiply(a, n)) + if m == 1: + return base10_add(base10_double(base10_multiply(a, n)), a) + return None + + +def hex_to_point(h): + """Converting hexadecimal to point value""" + return (decode(h[2:66], 16), decode(h[66:], 16)) + + +def point_to_hex(p): + """Converting point value to hexadecimal""" + return b'04' + encode(p[0], 16, 64) + encode(p[1], 16, 64) + + +def multiply(privkey, pubkey): + """Multiplying keys""" + return point_to_hex(base10_multiply( + hex_to_point(pubkey), decode(privkey, 16))) + + +def privtopub(privkey): + """Converting key from private to public""" + return point_to_hex(base10_multiply(G, decode(privkey, 16))) + + +def add(p1, p2): + """Adding two public keys""" + if len(p1) == 32: + return encode(decode(p1, 16) + decode(p2, 16) % P, 16, 32) + return point_to_hex(base10_add(hex_to_point(p1), hex_to_point(p2))) + + +def hash_160(string): + """Hashed version of public key""" + intermed = hashlib.sha256(string).digest() + ripemd160 = hashlib.new('ripemd160') + ripemd160.update(intermed) + return ripemd160.digest() + + +def dbl_sha256(string): + """Double hashing (SHA256)""" + return hashlib.sha256(hashlib.sha256(string).digest()).digest() + + +def bin_to_b58check(inp): + """Convert binary to base58""" + inp_fmtd = '\x00' + inp + leadingzbytes = len(re.match('^\x00*', inp_fmtd).group(0)) + checksum = dbl_sha256(inp_fmtd)[:4] + return '1' * leadingzbytes + changebase(inp_fmtd + checksum, 256, 58) + + +def pubkey_to_address(pubkey): + """Convert a public key (in hex) to a Bitcoin address""" + return bin_to_b58check(hash_160(changebase(pubkey, 16, 256))) diff --git a/src/tests/mock/pybitmessage/pyelliptic/cipher.py b/src/tests/mock/pybitmessage/pyelliptic/cipher.py new file mode 100644 index 00000000..af6c08ca --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/cipher.py @@ -0,0 +1,90 @@ +""" +Symmetric Encryption +""" +# Copyright (C) 2011 Yann GUIBET +# See LICENSE for details. + +from .openssl import OpenSSL + + +# pylint: disable=redefined-builtin +class Cipher(object): + """ + Main class for encryption + + import pyelliptic + iv = pyelliptic.Cipher.gen_IV('aes-256-cfb') + ctx = pyelliptic.Cipher("secretkey", iv, 1, ciphername='aes-256-cfb') + ciphertext = ctx.update('test1') + ciphertext += ctx.update('test2') + ciphertext += ctx.final() + + ctx2 = pyelliptic.Cipher("secretkey", iv, 0, ciphername='aes-256-cfb') + print ctx2.ciphering(ciphertext) + """ + def __init__(self, key, iv, do, ciphername='aes-256-cbc'): + """ + do == 1 => Encrypt; do == 0 => Decrypt + """ + self.cipher = OpenSSL.get_cipher(ciphername) + self.ctx = OpenSSL.EVP_CIPHER_CTX_new() + if do == 1 or do == 0: + k = OpenSSL.malloc(key, len(key)) + IV = OpenSSL.malloc(iv, len(iv)) + OpenSSL.EVP_CipherInit_ex( + self.ctx, self.cipher.get_pointer(), 0, k, IV, do) + else: + raise Exception("RTFM ...") + + @staticmethod + def get_all_cipher(): + """ + static method, returns all ciphers available + """ + return OpenSSL.cipher_algo.keys() + + @staticmethod + def get_blocksize(ciphername): + """This Method returns cipher blocksize""" + cipher = OpenSSL.get_cipher(ciphername) + return cipher.get_blocksize() + + @staticmethod + def gen_IV(ciphername): + """Generate random initialization vector""" + cipher = OpenSSL.get_cipher(ciphername) + return OpenSSL.rand(cipher.get_blocksize()) + + def update(self, input): + """Update result with more data""" + i = OpenSSL.c_int(0) + buffer = OpenSSL.malloc(b"", len(input) + self.cipher.get_blocksize()) + inp = OpenSSL.malloc(input, len(input)) + if OpenSSL.EVP_CipherUpdate(self.ctx, OpenSSL.byref(buffer), + OpenSSL.byref(i), inp, len(input)) == 0: + raise Exception("[OpenSSL] EVP_CipherUpdate FAIL ...") + return buffer.raw[0:i.value] # pylint: disable=invalid-slice-index + + def final(self): + """Returning the final value""" + i = OpenSSL.c_int(0) + buffer = OpenSSL.malloc(b"", self.cipher.get_blocksize()) + if (OpenSSL.EVP_CipherFinal_ex(self.ctx, OpenSSL.byref(buffer), + OpenSSL.byref(i))) == 0: + raise Exception("[OpenSSL] EVP_CipherFinal_ex FAIL ...") + return buffer.raw[0:i.value] # pylint: disable=invalid-slice-index + + def ciphering(self, input): + """ + Do update and final in one method + """ + buff = self.update(input) + return buff + self.final() + + def __del__(self): + # pylint: disable=protected-access + if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: + OpenSSL.EVP_CIPHER_CTX_reset(self.ctx) + else: + OpenSSL.EVP_CIPHER_CTX_cleanup(self.ctx) + OpenSSL.EVP_CIPHER_CTX_free(self.ctx) diff --git a/src/tests/mock/pybitmessage/pyelliptic/ecc.py b/src/tests/mock/pybitmessage/pyelliptic/ecc.py new file mode 100644 index 00000000..388227c7 --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/ecc.py @@ -0,0 +1,501 @@ +""" +Asymmetric cryptography using elliptic curves +""" +# pylint: disable=protected-access, too-many-branches, too-many-locals +# Copyright (C) 2011 Yann GUIBET +# See LICENSE for details. + +from hashlib import sha512 +from struct import pack, unpack + +from .cipher import Cipher +from .hash import equals, hmac_sha256 +from .openssl import OpenSSL + + +class ECC(object): + """ + Asymmetric encryption with Elliptic Curve Cryptography (ECC) + ECDH, ECDSA and ECIES + + >>> import pyelliptic + + >>> alice = pyelliptic.ECC() # default curve: sect283r1 + >>> bob = pyelliptic.ECC(curve='sect571r1') + + >>> ciphertext = alice.encrypt("Hello Bob", bob.get_pubkey()) + >>> print bob.decrypt(ciphertext) + + >>> signature = bob.sign("Hello Alice") + >>> # alice's job : + >>> print pyelliptic.ECC( + >>> pubkey=bob.get_pubkey()).verify(signature, "Hello Alice") + + >>> # ERROR !!! + >>> try: + >>> key = alice.get_ecdh_key(bob.get_pubkey()) + >>> except: + >>> print("For ECDH key agreement, the keys must be defined on the same curve !") + + >>> alice = pyelliptic.ECC(curve='sect571r1') + >>> print alice.get_ecdh_key(bob.get_pubkey()).encode('hex') + >>> print bob.get_ecdh_key(alice.get_pubkey()).encode('hex') + + """ + + def __init__( + self, + pubkey=None, + privkey=None, + pubkey_x=None, + pubkey_y=None, + raw_privkey=None, + curve='sect283r1', + ): # pylint: disable=too-many-arguments + """ + For a normal and High level use, specifie pubkey, + privkey (if you need) and the curve + """ + if isinstance(curve, str): + self.curve = OpenSSL.get_curve(curve) + else: + self.curve = curve + + if pubkey_x is not None and pubkey_y is not None: + self._set_keys(pubkey_x, pubkey_y, raw_privkey) + elif pubkey is not None: + curve, pubkey_x, pubkey_y, _ = ECC._decode_pubkey(pubkey) + if privkey is not None: + curve2, raw_privkey, _ = ECC._decode_privkey(privkey) + if curve != curve2: + raise Exception("Bad ECC keys ...") + self.curve = curve + self._set_keys(pubkey_x, pubkey_y, raw_privkey) + else: + self.privkey, self.pubkey_x, self.pubkey_y = self._generate() + + def _set_keys(self, pubkey_x, pubkey_y, privkey): + if self.raw_check_key(privkey, pubkey_x, pubkey_y) < 0: + self.pubkey_x = None + self.pubkey_y = None + self.privkey = None + raise Exception("Bad ECC keys ...") + else: + self.pubkey_x = pubkey_x + self.pubkey_y = pubkey_y + self.privkey = privkey + + @staticmethod + def get_curves(): + """ + static method, returns the list of all the curves available + """ + return OpenSSL.curves.keys() + + def get_curve(self): + """Encryption object from curve name""" + return OpenSSL.get_curve_by_id(self.curve) + + def get_curve_id(self): + """Currently used curve""" + return self.curve + + def get_pubkey(self): + """ + High level function which returns : + curve(2) + len_of_pubkeyX(2) + pubkeyX + len_of_pubkeyY + pubkeyY + """ + return b''.join(( + pack('!H', self.curve), + pack('!H', len(self.pubkey_x)), + self.pubkey_x, + pack('!H', len(self.pubkey_y)), + self.pubkey_y, + )) + + def get_privkey(self): + """ + High level function which returns + curve(2) + len_of_privkey(2) + privkey + """ + return b''.join(( + pack('!H', self.curve), + pack('!H', len(self.privkey)), + self.privkey, + )) + + @staticmethod + def _decode_pubkey(pubkey): + i = 0 + curve = unpack('!H', pubkey[i:i + 2])[0] + i += 2 + tmplen = unpack('!H', pubkey[i:i + 2])[0] + i += 2 + pubkey_x = pubkey[i:i + tmplen] + i += tmplen + tmplen = unpack('!H', pubkey[i:i + 2])[0] + i += 2 + pubkey_y = pubkey[i:i + tmplen] + i += tmplen + return curve, pubkey_x, pubkey_y, i + + @staticmethod + def _decode_privkey(privkey): + i = 0 + curve = unpack('!H', privkey[i:i + 2])[0] + i += 2 + tmplen = unpack('!H', privkey[i:i + 2])[0] + i += 2 + privkey = privkey[i:i + tmplen] + i += tmplen + return curve, privkey, i + + def _generate(self): + try: + pub_key_x = OpenSSL.BN_new() + pub_key_y = OpenSSL.BN_new() + + key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) + if key == 0: + raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") + if (OpenSSL.EC_KEY_generate_key(key)) == 0: + raise Exception("[OpenSSL] EC_KEY_generate_key FAIL ...") + if (OpenSSL.EC_KEY_check_key(key)) == 0: + raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") + priv_key = OpenSSL.EC_KEY_get0_private_key(key) + + group = OpenSSL.EC_KEY_get0_group(key) + pub_key = OpenSSL.EC_KEY_get0_public_key(key) + + if OpenSSL.EC_POINT_get_affine_coordinates_GFp( + group, pub_key, pub_key_x, pub_key_y, 0) == 0: + raise Exception( + "[OpenSSL] EC_POINT_get_affine_coordinates_GFp FAIL ...") + + privkey = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(priv_key)) + pubkeyx = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(pub_key_x)) + pubkeyy = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(pub_key_y)) + OpenSSL.BN_bn2bin(priv_key, privkey) + privkey = privkey.raw + OpenSSL.BN_bn2bin(pub_key_x, pubkeyx) + pubkeyx = pubkeyx.raw + OpenSSL.BN_bn2bin(pub_key_y, pubkeyy) + pubkeyy = pubkeyy.raw + self.raw_check_key(privkey, pubkeyx, pubkeyy) + + return privkey, pubkeyx, pubkeyy + + finally: + OpenSSL.EC_KEY_free(key) + OpenSSL.BN_free(pub_key_x) + OpenSSL.BN_free(pub_key_y) + + def get_ecdh_key(self, pubkey): + """ + High level function. Compute public key with the local private key + and returns a 512bits shared key + """ + curve, pubkey_x, pubkey_y, _ = ECC._decode_pubkey(pubkey) + if curve != self.curve: + raise Exception("ECC keys must be from the same curve !") + return sha512(self.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest() + + def raw_get_ecdh_key(self, pubkey_x, pubkey_y): + """ECDH key as binary data""" + try: + ecdh_keybuffer = OpenSSL.malloc(0, 32) + + other_key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) + if other_key == 0: + raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") + + other_pub_key_x = OpenSSL.BN_bin2bn(pubkey_x, len(pubkey_x), 0) + other_pub_key_y = OpenSSL.BN_bin2bn(pubkey_y, len(pubkey_y), 0) + + other_group = OpenSSL.EC_KEY_get0_group(other_key) + other_pub_key = OpenSSL.EC_POINT_new(other_group) + + if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(other_group, + other_pub_key, + other_pub_key_x, + other_pub_key_y, + 0)) == 0: + raise Exception( + "[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...") + if (OpenSSL.EC_KEY_set_public_key(other_key, other_pub_key)) == 0: + raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...") + if (OpenSSL.EC_KEY_check_key(other_key)) == 0: + raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") + + own_key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) + if own_key == 0: + raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") + own_priv_key = OpenSSL.BN_bin2bn( + self.privkey, len(self.privkey), 0) + + if (OpenSSL.EC_KEY_set_private_key(own_key, own_priv_key)) == 0: + raise Exception("[OpenSSL] EC_KEY_set_private_key FAIL ...") + + if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: + OpenSSL.EC_KEY_set_method(own_key, OpenSSL.EC_KEY_OpenSSL()) + else: + OpenSSL.ECDH_set_method(own_key, OpenSSL.ECDH_OpenSSL()) + ecdh_keylen = OpenSSL.ECDH_compute_key( + ecdh_keybuffer, 32, other_pub_key, own_key, 0) + + if ecdh_keylen != 32: + raise Exception("[OpenSSL] ECDH keylen FAIL ...") + + return ecdh_keybuffer.raw + + finally: + OpenSSL.EC_KEY_free(other_key) + OpenSSL.BN_free(other_pub_key_x) + OpenSSL.BN_free(other_pub_key_y) + OpenSSL.EC_POINT_free(other_pub_key) + OpenSSL.EC_KEY_free(own_key) + OpenSSL.BN_free(own_priv_key) + + def check_key(self, privkey, pubkey): + """ + Check the public key and the private key. + The private key is optional (replace by None) + """ + curve, pubkey_x, pubkey_y, _ = ECC._decode_pubkey(pubkey) + if privkey is None: + raw_privkey = None + curve2 = curve + else: + curve2, raw_privkey, _ = ECC._decode_privkey(privkey) + if curve != curve2: + raise Exception("Bad public and private key") + return self.raw_check_key(raw_privkey, pubkey_x, pubkey_y, curve) + + def raw_check_key(self, privkey, pubkey_x, pubkey_y, curve=None): + """Check key validity, key is supplied as binary data""" + if curve is None: + curve = self.curve + elif isinstance(curve, str): + curve = OpenSSL.get_curve(curve) + else: + curve = curve + try: + key = OpenSSL.EC_KEY_new_by_curve_name(curve) + if key == 0: + raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") + if privkey is not None: + priv_key = OpenSSL.BN_bin2bn(privkey, len(privkey), 0) + pub_key_x = OpenSSL.BN_bin2bn(pubkey_x, len(pubkey_x), 0) + pub_key_y = OpenSSL.BN_bin2bn(pubkey_y, len(pubkey_y), 0) + + if privkey is not None: + if (OpenSSL.EC_KEY_set_private_key(key, priv_key)) == 0: + raise Exception( + "[OpenSSL] EC_KEY_set_private_key FAIL ...") + + group = OpenSSL.EC_KEY_get0_group(key) + pub_key = OpenSSL.EC_POINT_new(group) + + if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(group, pub_key, + pub_key_x, + pub_key_y, + 0)) == 0: + raise Exception( + "[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...") + if (OpenSSL.EC_KEY_set_public_key(key, pub_key)) == 0: + raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...") + if (OpenSSL.EC_KEY_check_key(key)) == 0: + raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") + return 0 + + finally: + OpenSSL.EC_KEY_free(key) + OpenSSL.BN_free(pub_key_x) + OpenSSL.BN_free(pub_key_y) + OpenSSL.EC_POINT_free(pub_key) + if privkey is not None: + OpenSSL.BN_free(priv_key) + + def sign(self, inputb, digest_alg=OpenSSL.digest_ecdsa_sha1): + """ + Sign the input with ECDSA method and returns the signature + """ + try: + size = len(inputb) + buff = OpenSSL.malloc(inputb, size) + digest = OpenSSL.malloc(0, 64) + if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: + md_ctx = OpenSSL.EVP_MD_CTX_new() + else: + md_ctx = OpenSSL.EVP_MD_CTX_create() + dgst_len = OpenSSL.pointer(OpenSSL.c_int(0)) + siglen = OpenSSL.pointer(OpenSSL.c_int(0)) + sig = OpenSSL.malloc(0, 151) + + key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) + if key == 0: + raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") + + priv_key = OpenSSL.BN_bin2bn(self.privkey, len(self.privkey), 0) + pub_key_x = OpenSSL.BN_bin2bn(self.pubkey_x, len(self.pubkey_x), 0) + pub_key_y = OpenSSL.BN_bin2bn(self.pubkey_y, len(self.pubkey_y), 0) + + if (OpenSSL.EC_KEY_set_private_key(key, priv_key)) == 0: + raise Exception("[OpenSSL] EC_KEY_set_private_key FAIL ...") + + group = OpenSSL.EC_KEY_get0_group(key) + pub_key = OpenSSL.EC_POINT_new(group) + + if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(group, pub_key, + pub_key_x, + pub_key_y, + 0)) == 0: + raise Exception( + "[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...") + if (OpenSSL.EC_KEY_set_public_key(key, pub_key)) == 0: + raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...") + if (OpenSSL.EC_KEY_check_key(key)) == 0: + raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") + + if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: + OpenSSL.EVP_MD_CTX_new(md_ctx) + else: + OpenSSL.EVP_MD_CTX_init(md_ctx) + OpenSSL.EVP_DigestInit_ex(md_ctx, digest_alg(), None) + + if (OpenSSL.EVP_DigestUpdate(md_ctx, buff, size)) == 0: + raise Exception("[OpenSSL] EVP_DigestUpdate FAIL ...") + OpenSSL.EVP_DigestFinal_ex(md_ctx, digest, dgst_len) + OpenSSL.ECDSA_sign(0, digest, dgst_len.contents, sig, siglen, key) + if (OpenSSL.ECDSA_verify(0, digest, dgst_len.contents, sig, + siglen.contents, key)) != 1: + raise Exception("[OpenSSL] ECDSA_verify FAIL ...") + + return sig.raw[:siglen.contents.value] + + finally: + OpenSSL.EC_KEY_free(key) + OpenSSL.BN_free(pub_key_x) + OpenSSL.BN_free(pub_key_y) + OpenSSL.BN_free(priv_key) + OpenSSL.EC_POINT_free(pub_key) + if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: + OpenSSL.EVP_MD_CTX_free(md_ctx) + else: + OpenSSL.EVP_MD_CTX_destroy(md_ctx) + + def verify(self, sig, inputb, digest_alg=OpenSSL.digest_ecdsa_sha1): + """ + Verify the signature with the input and the local public key. + Returns a boolean + """ + try: + bsig = OpenSSL.malloc(sig, len(sig)) + binputb = OpenSSL.malloc(inputb, len(inputb)) + digest = OpenSSL.malloc(0, 64) + dgst_len = OpenSSL.pointer(OpenSSL.c_int(0)) + if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: + md_ctx = OpenSSL.EVP_MD_CTX_new() + else: + md_ctx = OpenSSL.EVP_MD_CTX_create() + key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) + + if key == 0: + raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") + + pub_key_x = OpenSSL.BN_bin2bn(self.pubkey_x, len(self.pubkey_x), 0) + pub_key_y = OpenSSL.BN_bin2bn(self.pubkey_y, len(self.pubkey_y), 0) + group = OpenSSL.EC_KEY_get0_group(key) + pub_key = OpenSSL.EC_POINT_new(group) + + if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(group, pub_key, + pub_key_x, + pub_key_y, + 0)) == 0: + raise Exception( + "[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...") + if (OpenSSL.EC_KEY_set_public_key(key, pub_key)) == 0: + raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...") + if (OpenSSL.EC_KEY_check_key(key)) == 0: + raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") + if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: + OpenSSL.EVP_MD_CTX_new(md_ctx) + else: + OpenSSL.EVP_MD_CTX_init(md_ctx) + OpenSSL.EVP_DigestInit_ex(md_ctx, digest_alg(), None) + if (OpenSSL.EVP_DigestUpdate(md_ctx, binputb, len(inputb))) == 0: + raise Exception("[OpenSSL] EVP_DigestUpdate FAIL ...") + + OpenSSL.EVP_DigestFinal_ex(md_ctx, digest, dgst_len) + ret = OpenSSL.ECDSA_verify( + 0, digest, dgst_len.contents, bsig, len(sig), key) + + if ret == -1: + # Fail to Check + return False + if ret == 0: + # Bad signature ! + return False + # Good + return True + + finally: + OpenSSL.EC_KEY_free(key) + OpenSSL.BN_free(pub_key_x) + OpenSSL.BN_free(pub_key_y) + OpenSSL.EC_POINT_free(pub_key) + if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: + OpenSSL.EVP_MD_CTX_free(md_ctx) + else: + OpenSSL.EVP_MD_CTX_destroy(md_ctx) + + @staticmethod + def encrypt(data, pubkey, ephemcurve=None, ciphername='aes-256-cbc'): + """ + Encrypt data with ECIES method using the public key of the recipient. + """ + curve, pubkey_x, pubkey_y, _ = ECC._decode_pubkey(pubkey) + return ECC.raw_encrypt(data, pubkey_x, pubkey_y, curve=curve, + ephemcurve=ephemcurve, ciphername=ciphername) + + @staticmethod + def raw_encrypt( + data, + pubkey_x, + pubkey_y, + curve='sect283r1', + ephemcurve=None, + ciphername='aes-256-cbc', + ): # pylint: disable=too-many-arguments + """ECHD encryption, keys supplied in binary data format""" + + if ephemcurve is None: + ephemcurve = curve + ephem = ECC(curve=ephemcurve) + key = sha512(ephem.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest() + key_e, key_m = key[:32], key[32:] + pubkey = ephem.get_pubkey() + iv = OpenSSL.rand(OpenSSL.get_cipher(ciphername).get_blocksize()) + ctx = Cipher(key_e, iv, 1, ciphername) + ciphertext = iv + pubkey + ctx.ciphering(data) + mac = hmac_sha256(key_m, ciphertext) + return ciphertext + mac + + def decrypt(self, data, ciphername='aes-256-cbc'): + """ + Decrypt data with ECIES method using the local private key + """ + blocksize = OpenSSL.get_cipher(ciphername).get_blocksize() + iv = data[:blocksize] + i = blocksize + _, pubkey_x, pubkey_y, i2 = ECC._decode_pubkey(data[i:]) + i += i2 + ciphertext = data[i:len(data) - 32] + i += len(ciphertext) + mac = data[i:] + key = sha512(self.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest() + key_e, key_m = key[:32], key[32:] + if not equals(hmac_sha256(key_m, data[:len(data) - 32]), mac): + raise RuntimeError("Fail to verify data") + ctx = Cipher(key_e, iv, 0, ciphername) + return ctx.ciphering(ciphertext) diff --git a/src/tests/mock/pybitmessage/pyelliptic/eccblind.py b/src/tests/mock/pybitmessage/pyelliptic/eccblind.py new file mode 100644 index 00000000..83bc7632 --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/eccblind.py @@ -0,0 +1,373 @@ +""" +ECC blind signature functionality based on +"An Efficient Blind Signature Scheme +Based on the Elliptic CurveDiscrete Logarithm Problem" by Morteza Nikooghadama + and Ali Zakerolhosseini , +http://www.isecure-journal.com/article_39171_47f9ec605dd3918c2793565ec21fcd7a.pdf +""" + +# variable names are based on the math in the paper, so they don't conform +# to PEP8 + +import time +from hashlib import sha256 +from struct import pack, unpack + +from .openssl import OpenSSL + +# first byte in serialisation can contain data +Y_BIT = 0x01 +COMPRESSED_BIT = 0x02 + +# formats +BIGNUM = '!32s' +EC = '!B32s' +PUBKEY = '!BB33s' + + +class Expiration(object): + """Expiration of pubkey""" + @staticmethod + def deserialize(val): + """Create an object out of int""" + year = ((val & 0xF0) >> 4) + 2020 + month = val & 0x0F + assert month < 12 + return Expiration(year, month) + + def __init__(self, year, month): + assert isinstance(year, int) + assert year > 2019 and year < 2036 + assert isinstance(month, int) + assert month < 12 + self.year = year + self.month = month + self.exp = year + month / 12.0 + + def serialize(self): + """Make int out of object""" + return ((self.year - 2020) << 4) + self.month + + def verify(self): + """Check if the pubkey has expired""" + now = time.gmtime() + return self.exp >= now.tm_year + (now.tm_mon - 1) / 12.0 + + +class Value(object): + """Value of a pubkey""" + @staticmethod + def deserialize(val): + """Make object out of int""" + return Value(val) + + def __init__(self, value=0xFF): + assert isinstance(value, int) + self.value = value + + def serialize(self): + """Make int out of object""" + return self.value & 0xFF + + def verify(self, value): + """Verify against supplied value""" + return value <= self.value + + +class ECCBlind(object): # pylint: disable=too-many-instance-attributes + """ + Class for ECC blind signature functionality + """ + + # init + k = None + R = None + F = None + d = None + Q = None + a = None + b = None + c = None + binv = None + r = None + m = None + m_ = None + s_ = None + signature = None + exp = None + val = None + + def ec_get_random(self): + """ + Random integer within the EC order + """ + randomnum = OpenSSL.BN_new() + OpenSSL.BN_rand(randomnum, OpenSSL.BN_num_bits(self.n), 0, 0) + return randomnum + + def ec_invert(self, a): + """ + ECC inversion + """ + inverse = OpenSSL.BN_mod_inverse(0, a, self.n, self.ctx) + return inverse + + def ec_gen_keypair(self): + """ + Generate an ECC keypair + We're using compressed keys + """ + d = self.ec_get_random() + Q = OpenSSL.EC_POINT_new(self.group) + OpenSSL.EC_POINT_mul(self.group, Q, d, 0, 0, 0) + return (d, Q) + + def ec_Ftor(self, F): + """ + x0 coordinate of F + """ + # F = (x0, y0) + x0 = OpenSSL.BN_new() + y0 = OpenSSL.BN_new() + OpenSSL.EC_POINT_get_affine_coordinates(self.group, F, x0, y0, self.ctx) + OpenSSL.BN_free(y0) + return x0 + + def _ec_point_serialize(self, point): + """Make an EC point into a string""" + try: + x = OpenSSL.BN_new() + y = OpenSSL.BN_new() + OpenSSL.EC_POINT_get_affine_coordinates( + self.group, point, x, y, 0) + y_byte = (OpenSSL.BN_is_odd(y) & Y_BIT) | COMPRESSED_BIT + l_ = OpenSSL.BN_num_bytes(self.n) + try: + bx = OpenSSL.malloc(0, l_) + OpenSSL.BN_bn2binpad(x, bx, l_) + out = bx.raw + except AttributeError: + # padding manually + bx = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(x)) + OpenSSL.BN_bn2bin(x, bx) + out = bx.raw.rjust(l_, b'\x00') + return pack(EC, y_byte, out) + + finally: + OpenSSL.BN_clear_free(x) + OpenSSL.BN_clear_free(y) + + def _ec_point_deserialize(self, data): + """Make a string into an EC point""" + y_bit, x_raw = unpack(EC, data) + x = OpenSSL.BN_bin2bn(x_raw, OpenSSL.BN_num_bytes(self.n), 0) + y_bit &= Y_BIT + retval = OpenSSL.EC_POINT_new(self.group) + OpenSSL.EC_POINT_set_compressed_coordinates(self.group, + retval, + x, + y_bit, + self.ctx) + return retval + + def _bn_serialize(self, bn): + """Make a string out of BigNum""" + l_ = OpenSSL.BN_num_bytes(self.n) + try: + o = OpenSSL.malloc(0, l_) + OpenSSL.BN_bn2binpad(bn, o, l_) + return o.raw + except AttributeError: + o = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(bn)) + OpenSSL.BN_bn2bin(bn, o) + return o.raw.rjust(l_, b'\x00') + + def _bn_deserialize(self, data): + """Make a BigNum out of string""" + x = OpenSSL.BN_bin2bn(data, OpenSSL.BN_num_bytes(self.n), 0) + return x + + def _init_privkey(self, privkey): + """Initialise private key out of string/bytes""" + self.d = self._bn_deserialize(privkey) + + def privkey(self): + """Make a private key into a string""" + return pack(BIGNUM, self.d) + + def _init_pubkey(self, pubkey): + """Initialise pubkey out of string/bytes""" + unpacked = unpack(PUBKEY, pubkey) + self.expiration = Expiration.deserialize(unpacked[0]) + self.value = Value.deserialize(unpacked[1]) + self.Q = self._ec_point_deserialize(unpacked[2]) + + def pubkey(self): + """Make a pubkey into a string""" + return pack(PUBKEY, self.expiration.serialize(), + self.value.serialize(), + self._ec_point_serialize(self.Q)) + + def __init__(self, curve="secp256k1", pubkey=None, privkey=None, # pylint: disable=too-many-arguments + year=2025, month=11, value=0xFF): + self.ctx = OpenSSL.BN_CTX_new() + + # ECC group + self.group = OpenSSL.EC_GROUP_new_by_curve_name( + OpenSSL.get_curve(curve)) + + # Order n + self.n = OpenSSL.BN_new() + OpenSSL.EC_GROUP_get_order(self.group, self.n, self.ctx) + + # Generator G + self.G = OpenSSL.EC_GROUP_get0_generator(self.group) + + # Identity O (infinity) + self.iO = OpenSSL.EC_POINT_new(self.group) + OpenSSL.EC_POINT_set_to_infinity(self.group, self.iO) + + if privkey: + assert pubkey + # load both pubkey and privkey from bytes + self._init_privkey(privkey) + self._init_pubkey(pubkey) + elif pubkey: + # load pubkey from bytes + self._init_pubkey(pubkey) + else: + # new keypair + self.d, self.Q = self.ec_gen_keypair() + if not year or not month: + now = time.gmtime() + if now.tm_mon == 12: + self.expiration = Expiration(now.tm_year + 1, 1) + else: + self.expiration = Expiration(now.tm_year, now.tm_mon + 1) + else: + self.expiration = Expiration(year, month) + self.value = Value(value) + + def __del__(self): + OpenSSL.BN_free(self.n) + OpenSSL.BN_CTX_free(self.ctx) + + def signer_init(self): + """ + Init signer + """ + # Signer: Random integer k + self.k = self.ec_get_random() + + # R = kG + self.R = OpenSSL.EC_POINT_new(self.group) + OpenSSL.EC_POINT_mul(self.group, self.R, self.k, 0, 0, 0) + + return self._ec_point_serialize(self.R) + + def create_signing_request(self, R, msg): + """ + Requester creates a new signing request + """ + self.R = self._ec_point_deserialize(R) + msghash = sha256(msg).digest() + + # Requester: 3 random blinding factors + self.F = OpenSSL.EC_POINT_new(self.group) + OpenSSL.EC_POINT_set_to_infinity(self.group, self.F) + temp = OpenSSL.EC_POINT_new(self.group) + abinv = OpenSSL.BN_new() + + # F != O + while OpenSSL.EC_POINT_cmp(self.group, self.F, self.iO, self.ctx) == 0: + self.a = self.ec_get_random() + self.b = self.ec_get_random() + self.c = self.ec_get_random() + + # F = b^-1 * R... + self.binv = self.ec_invert(self.b) + OpenSSL.EC_POINT_mul(self.group, temp, 0, self.R, self.binv, 0) + OpenSSL.EC_POINT_copy(self.F, temp) + + # ... + a*b^-1 * Q... + OpenSSL.BN_mul(abinv, self.a, self.binv, self.ctx) + OpenSSL.EC_POINT_mul(self.group, temp, 0, self.Q, abinv, 0) + OpenSSL.EC_POINT_add(self.group, self.F, self.F, temp, 0) + + # ... + c*G + OpenSSL.EC_POINT_mul(self.group, temp, 0, self.G, self.c, 0) + OpenSSL.EC_POINT_add(self.group, self.F, self.F, temp, 0) + + # F = (x0, y0) + self.r = self.ec_Ftor(self.F) + + # Requester: Blinding (m' = br(m) + a) + self.m = OpenSSL.BN_new() + OpenSSL.BN_bin2bn(msghash, len(msghash), self.m) + + self.m_ = OpenSSL.BN_new() + OpenSSL.BN_mod_mul(self.m_, self.b, self.r, self.n, self.ctx) + OpenSSL.BN_mod_mul(self.m_, self.m_, self.m, self.n, self.ctx) + OpenSSL.BN_mod_add(self.m_, self.m_, self.a, self.n, self.ctx) + return self._bn_serialize(self.m_) + + def blind_sign(self, m_): + """ + Signer blind-signs the request + """ + self.m_ = self._bn_deserialize(m_) + self.s_ = OpenSSL.BN_new() + OpenSSL.BN_mod_mul(self.s_, self.d, self.m_, self.n, self.ctx) + OpenSSL.BN_mod_add(self.s_, self.s_, self.k, self.n, self.ctx) + OpenSSL.BN_free(self.k) + return self._bn_serialize(self.s_) + + def unblind(self, s_): + """ + Requester unblinds the signature + """ + self.s_ = self._bn_deserialize(s_) + s = OpenSSL.BN_new() + OpenSSL.BN_mod_mul(s, self.binv, self.s_, self.n, self.ctx) + OpenSSL.BN_mod_add(s, s, self.c, self.n, self.ctx) + OpenSSL.BN_free(self.a) + OpenSSL.BN_free(self.b) + OpenSSL.BN_free(self.c) + self.signature = (s, self.F) + return self._bn_serialize(s) + self._ec_point_serialize(self.F) + + def verify(self, msg, signature, value=1): + """ + Verify signature with certifier's pubkey + """ + + # convert msg to BIGNUM + self.m = OpenSSL.BN_new() + msghash = sha256(msg).digest() + OpenSSL.BN_bin2bn(msghash, len(msghash), self.m) + + # init + s, self.F = (self._bn_deserialize(signature[0:32]), + self._ec_point_deserialize(signature[32:])) + if self.r is None: + self.r = self.ec_Ftor(self.F) + + lhs = OpenSSL.EC_POINT_new(self.group) + rhs = OpenSSL.EC_POINT_new(self.group) + + OpenSSL.EC_POINT_mul(self.group, lhs, s, 0, 0, 0) + + OpenSSL.EC_POINT_mul(self.group, rhs, 0, self.Q, self.m, 0) + OpenSSL.EC_POINT_mul(self.group, rhs, 0, rhs, self.r, 0) + OpenSSL.EC_POINT_add(self.group, rhs, rhs, self.F, self.ctx) + + retval = OpenSSL.EC_POINT_cmp(self.group, lhs, rhs, self.ctx) + if retval == -1: + raise RuntimeError("EC_POINT_cmp returned an error") + elif not self.value.verify(value): + return False + elif not self.expiration.verify(): + return False + elif retval != 0: + return False + return True diff --git a/src/tests/mock/pybitmessage/pyelliptic/eccblindchain.py b/src/tests/mock/pybitmessage/pyelliptic/eccblindchain.py new file mode 100644 index 00000000..56e8ce2a --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/eccblindchain.py @@ -0,0 +1,52 @@ +""" +Blind signature chain with a top level CA +""" + +from .eccblind import ECCBlind + + +class ECCBlindChain(object): # pylint: disable=too-few-public-methods + """ + # Class for ECC Blind Chain signature functionality + """ + + def __init__(self, ca=None, chain=None): + self.chain = [] + self.ca = [] + if ca: + for i in range(0, len(ca), 35): + self.ca.append(ca[i:i + 35]) + if chain: + self.chain.append(chain[0:35]) + for i in range(35, len(chain), 100): + if len(chain[i:]) == 65: + self.chain.append(chain[i:i + 65]) + else: + self.chain.append(chain[i:i + 100]) + + def verify(self, msg, value): + """Verify a chain provides supplied message and value""" + parent = None + l_ = 0 + for level in self.chain: + l_ += 1 + pubkey = None + signature = None + if len(level) == 100: + pubkey, signature = (level[0:35], level[35:]) + elif len(level) == 35: + if level not in self.ca: + return False + parent = level + continue + else: + signature = level + verifier_obj = ECCBlind(pubkey=parent) + if pubkey: + if not verifier_obj.verify(pubkey, signature, value): + return False + parent = pubkey + else: + return verifier_obj.verify(msg=msg, signature=signature, + value=value) + return None diff --git a/src/tests/mock/pybitmessage/pyelliptic/hash.py b/src/tests/mock/pybitmessage/pyelliptic/hash.py new file mode 100644 index 00000000..70c9a6ce --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/hash.py @@ -0,0 +1,70 @@ +""" +Wrappers for hash functions from OpenSSL. +""" +# Copyright (C) 2011 Yann GUIBET +# See LICENSE for details. + +from .openssl import OpenSSL + + +# For python3 +def _equals_bytes(a, b): + if len(a) != len(b): + return False + result = 0 + for x, y in zip(a, b): + result |= x ^ y + return result == 0 + + +def _equals_str(a, b): + if len(a) != len(b): + return False + result = 0 + for x, y in zip(a, b): + result |= ord(x) ^ ord(y) + return result == 0 + + +def equals(a, b): + """Compare two strings or bytearrays""" + if isinstance(a, str): + return _equals_str(a, b) + return _equals_bytes(a, b) + + +def hmac_sha256(k, m): + """ + Compute the key and the message with HMAC SHA5256 + """ + key = OpenSSL.malloc(k, len(k)) + d = OpenSSL.malloc(m, len(m)) + md = OpenSSL.malloc(0, 32) + i = OpenSSL.pointer(OpenSSL.c_int(0)) + OpenSSL.HMAC(OpenSSL.EVP_sha256(), key, len(k), d, len(m), md, i) + return md.raw + + +def hmac_sha512(k, m): + """ + Compute the key and the message with HMAC SHA512 + """ + key = OpenSSL.malloc(k, len(k)) + d = OpenSSL.malloc(m, len(m)) + md = OpenSSL.malloc(0, 64) + i = OpenSSL.pointer(OpenSSL.c_int(0)) + OpenSSL.HMAC(OpenSSL.EVP_sha512(), key, len(k), d, len(m), md, i) + return md.raw + + +def pbkdf2(password, salt=None, i=10000, keylen=64): + """Key derivation function using SHA256""" + if salt is None: + salt = OpenSSL.rand(8) + p_password = OpenSSL.malloc(password, len(password)) + p_salt = OpenSSL.malloc(salt, len(salt)) + output = OpenSSL.malloc(0, keylen) + OpenSSL.PKCS5_PBKDF2_HMAC(p_password, len(password), p_salt, + len(p_salt), i, OpenSSL.EVP_sha256(), + keylen, output) + return salt, output.raw diff --git a/src/tests/mock/pybitmessage/pyelliptic/openssl.py b/src/tests/mock/pybitmessage/pyelliptic/openssl.py new file mode 100644 index 00000000..abc6ac13 --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/openssl.py @@ -0,0 +1,803 @@ +# Copyright (C) 2011 Yann GUIBET +# See LICENSE for details. +# +# Software slightly changed by Jonathan Warren +""" +This module loads openssl libs with ctypes and incapsulates +needed openssl functionality in class _OpenSSL. +""" +import ctypes +import sys + +# pylint: disable=protected-access + +OpenSSL = None + + +class CipherName(object): + """Class returns cipher name, pointer and blocksize""" + + def __init__(self, name, pointer, blocksize): + self._name = name + self._pointer = pointer + self._blocksize = blocksize + + def __str__(self): + return "Cipher : " + self._name + \ + " | Blocksize : " + str(self._blocksize) + \ + " | Function pointer : " + str(self._pointer) + + def get_pointer(self): + """This method returns cipher pointer""" + return self._pointer() + + def get_name(self): + """This method returns cipher name""" + return self._name + + def get_blocksize(self): + """This method returns cipher blocksize""" + return self._blocksize + + +def get_version(library): + """This function return version, hexversion and cflages""" + version = None + hexversion = None + cflags = None + try: + # OpenSSL 1.1 + OPENSSL_VERSION = 0 + OPENSSL_CFLAGS = 1 + library.OpenSSL_version.argtypes = [ctypes.c_int] + library.OpenSSL_version.restype = ctypes.c_char_p + version = library.OpenSSL_version(OPENSSL_VERSION) + cflags = library.OpenSSL_version(OPENSSL_CFLAGS) + library.OpenSSL_version_num.restype = ctypes.c_long + hexversion = library.OpenSSL_version_num() + except AttributeError: + try: + # OpenSSL 1.0 + SSLEAY_VERSION = 0 + SSLEAY_CFLAGS = 2 + library.SSLeay.restype = ctypes.c_long + library.SSLeay_version.restype = ctypes.c_char_p + library.SSLeay_version.argtypes = [ctypes.c_int] + version = library.SSLeay_version(SSLEAY_VERSION) + cflags = library.SSLeay_version(SSLEAY_CFLAGS) + hexversion = library.SSLeay() + except AttributeError: + # raise NotImplementedError('Cannot determine version of this OpenSSL library.') + pass + return (version, hexversion, cflags) + + +class _OpenSSL(object): + """ + Wrapper for OpenSSL using ctypes + """ + # pylint: disable=too-many-statements, too-many-instance-attributes + def __init__(self, library): + """ + Build the wrapper + """ + self._lib = ctypes.CDLL(library) + self._version, self._hexversion, self._cflags = get_version(self._lib) + self._libreSSL = self._version.startswith(b"LibreSSL") + + self.pointer = ctypes.pointer + self.c_int = ctypes.c_int + self.byref = ctypes.byref + self.create_string_buffer = ctypes.create_string_buffer + + self.BN_new = self._lib.BN_new + self.BN_new.restype = ctypes.c_void_p + self.BN_new.argtypes = [] + + self.BN_free = self._lib.BN_free + self.BN_free.restype = None + self.BN_free.argtypes = [ctypes.c_void_p] + + self.BN_clear_free = self._lib.BN_clear_free + self.BN_clear_free.restype = None + self.BN_clear_free.argtypes = [ctypes.c_void_p] + + self.BN_num_bits = self._lib.BN_num_bits + self.BN_num_bits.restype = ctypes.c_int + self.BN_num_bits.argtypes = [ctypes.c_void_p] + + self.BN_bn2bin = self._lib.BN_bn2bin + self.BN_bn2bin.restype = ctypes.c_int + self.BN_bn2bin.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + + try: + self.BN_bn2binpad = self._lib.BN_bn2binpad + self.BN_bn2binpad.restype = ctypes.c_int + self.BN_bn2binpad.argtypes = [ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_int] + except AttributeError: + # optional, we have a workaround + pass + + self.BN_bin2bn = self._lib.BN_bin2bn + self.BN_bin2bn.restype = ctypes.c_void_p + self.BN_bin2bn.argtypes = [ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p] + + self.EC_KEY_free = self._lib.EC_KEY_free + self.EC_KEY_free.restype = None + self.EC_KEY_free.argtypes = [ctypes.c_void_p] + + self.EC_KEY_new_by_curve_name = self._lib.EC_KEY_new_by_curve_name + self.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p + self.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int] + + self.EC_KEY_generate_key = self._lib.EC_KEY_generate_key + self.EC_KEY_generate_key.restype = ctypes.c_int + self.EC_KEY_generate_key.argtypes = [ctypes.c_void_p] + + self.EC_KEY_check_key = self._lib.EC_KEY_check_key + self.EC_KEY_check_key.restype = ctypes.c_int + self.EC_KEY_check_key.argtypes = [ctypes.c_void_p] + + self.EC_KEY_get0_private_key = self._lib.EC_KEY_get0_private_key + self.EC_KEY_get0_private_key.restype = ctypes.c_void_p + self.EC_KEY_get0_private_key.argtypes = [ctypes.c_void_p] + + self.EC_KEY_get0_public_key = self._lib.EC_KEY_get0_public_key + self.EC_KEY_get0_public_key.restype = ctypes.c_void_p + self.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p] + + self.EC_KEY_get0_group = self._lib.EC_KEY_get0_group + self.EC_KEY_get0_group.restype = ctypes.c_void_p + self.EC_KEY_get0_group.argtypes = [ctypes.c_void_p] + + self.EC_POINT_get_affine_coordinates_GFp = \ + self._lib.EC_POINT_get_affine_coordinates_GFp + self.EC_POINT_get_affine_coordinates_GFp.restype = ctypes.c_int + self.EC_POINT_get_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + try: + self.EC_POINT_get_affine_coordinates = \ + self._lib.EC_POINT_get_affine_coordinates + except AttributeError: + # OpenSSL docs say only use this for backwards compatibility + self.EC_POINT_get_affine_coordinates = \ + self._lib.EC_POINT_get_affine_coordinates_GF2m + self.EC_POINT_get_affine_coordinates.restype = ctypes.c_int + self.EC_POINT_get_affine_coordinates.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key + self.EC_KEY_set_private_key.restype = ctypes.c_int + self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_KEY_set_public_key = self._lib.EC_KEY_set_public_key + self.EC_KEY_set_public_key.restype = ctypes.c_int + self.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_KEY_set_group = self._lib.EC_KEY_set_group + self.EC_KEY_set_group.restype = ctypes.c_int + self.EC_KEY_set_group.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_POINT_set_affine_coordinates_GFp = \ + self._lib.EC_POINT_set_affine_coordinates_GFp + self.EC_POINT_set_affine_coordinates_GFp.restype = ctypes.c_int + self.EC_POINT_set_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + try: + self.EC_POINT_set_affine_coordinates = \ + self._lib.EC_POINT_set_affine_coordinates + except AttributeError: + # OpenSSL docs say only use this for backwards compatibility + self.EC_POINT_set_affine_coordinates = \ + self._lib.EC_POINT_set_affine_coordinates_GF2m + self.EC_POINT_set_affine_coordinates.restype = ctypes.c_int + self.EC_POINT_set_affine_coordinates.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + try: + self.EC_POINT_set_compressed_coordinates = \ + self._lib.EC_POINT_set_compressed_coordinates + except AttributeError: + # OpenSSL docs say only use this for backwards compatibility + self.EC_POINT_set_compressed_coordinates = \ + self._lib.EC_POINT_set_compressed_coordinates_GF2m + self.EC_POINT_set_compressed_coordinates.restype = ctypes.c_int + self.EC_POINT_set_compressed_coordinates.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_void_p] + + self.EC_POINT_new = self._lib.EC_POINT_new + self.EC_POINT_new.restype = ctypes.c_void_p + self.EC_POINT_new.argtypes = [ctypes.c_void_p] + + self.EC_POINT_free = self._lib.EC_POINT_free + self.EC_POINT_free.restype = None + self.EC_POINT_free.argtypes = [ctypes.c_void_p] + + self.BN_CTX_free = self._lib.BN_CTX_free + self.BN_CTX_free.restype = None + self.BN_CTX_free.argtypes = [ctypes.c_void_p] + + self.EC_POINT_mul = self._lib.EC_POINT_mul + self.EC_POINT_mul.restype = None + self.EC_POINT_mul.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key + self.EC_KEY_set_private_key.restype = ctypes.c_int + self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + if self._hexversion >= 0x10100000 and not self._libreSSL: + self.EC_KEY_OpenSSL = self._lib.EC_KEY_OpenSSL + self._lib.EC_KEY_OpenSSL.restype = ctypes.c_void_p + self._lib.EC_KEY_OpenSSL.argtypes = [] + + self.EC_KEY_set_method = self._lib.EC_KEY_set_method + self._lib.EC_KEY_set_method.restype = ctypes.c_int + self._lib.EC_KEY_set_method.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + else: + self.ECDH_OpenSSL = self._lib.ECDH_OpenSSL + self._lib.ECDH_OpenSSL.restype = ctypes.c_void_p + self._lib.ECDH_OpenSSL.argtypes = [] + + self.ECDH_set_method = self._lib.ECDH_set_method + self._lib.ECDH_set_method.restype = ctypes.c_int + self._lib.ECDH_set_method.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.ECDH_compute_key = self._lib.ECDH_compute_key + self.ECDH_compute_key.restype = ctypes.c_int + self.ECDH_compute_key.argtypes = [ctypes.c_void_p, + ctypes.c_int, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EVP_CipherInit_ex = self._lib.EVP_CipherInit_ex + self.EVP_CipherInit_ex.restype = ctypes.c_int + self.EVP_CipherInit_ex.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EVP_CIPHER_CTX_new = self._lib.EVP_CIPHER_CTX_new + self.EVP_CIPHER_CTX_new.restype = ctypes.c_void_p + self.EVP_CIPHER_CTX_new.argtypes = [] + + # Cipher + self.EVP_aes_128_cfb128 = self._lib.EVP_aes_128_cfb128 + self.EVP_aes_128_cfb128.restype = ctypes.c_void_p + self.EVP_aes_128_cfb128.argtypes = [] + + self.EVP_aes_256_cfb128 = self._lib.EVP_aes_256_cfb128 + self.EVP_aes_256_cfb128.restype = ctypes.c_void_p + self.EVP_aes_256_cfb128.argtypes = [] + + self.EVP_aes_128_cbc = self._lib.EVP_aes_128_cbc + self.EVP_aes_128_cbc.restype = ctypes.c_void_p + self.EVP_aes_128_cbc.argtypes = [] + + self.EVP_aes_256_cbc = self._lib.EVP_aes_256_cbc + self.EVP_aes_256_cbc.restype = ctypes.c_void_p + self.EVP_aes_256_cbc.argtypes = [] + + # self.EVP_aes_128_ctr = self._lib.EVP_aes_128_ctr + # self.EVP_aes_128_ctr.restype = ctypes.c_void_p + # self.EVP_aes_128_ctr.argtypes = [] + + # self.EVP_aes_256_ctr = self._lib.EVP_aes_256_ctr + # self.EVP_aes_256_ctr.restype = ctypes.c_void_p + # self.EVP_aes_256_ctr.argtypes = [] + + self.EVP_aes_128_ofb = self._lib.EVP_aes_128_ofb + self.EVP_aes_128_ofb.restype = ctypes.c_void_p + self.EVP_aes_128_ofb.argtypes = [] + + self.EVP_aes_256_ofb = self._lib.EVP_aes_256_ofb + self.EVP_aes_256_ofb.restype = ctypes.c_void_p + self.EVP_aes_256_ofb.argtypes = [] + + self.EVP_bf_cbc = self._lib.EVP_bf_cbc + self.EVP_bf_cbc.restype = ctypes.c_void_p + self.EVP_bf_cbc.argtypes = [] + + self.EVP_bf_cfb64 = self._lib.EVP_bf_cfb64 + self.EVP_bf_cfb64.restype = ctypes.c_void_p + self.EVP_bf_cfb64.argtypes = [] + + self.EVP_rc4 = self._lib.EVP_rc4 + self.EVP_rc4.restype = ctypes.c_void_p + self.EVP_rc4.argtypes = [] + + if self._hexversion >= 0x10100000 and not self._libreSSL: + self.EVP_CIPHER_CTX_reset = self._lib.EVP_CIPHER_CTX_reset + self.EVP_CIPHER_CTX_reset.restype = ctypes.c_int + self.EVP_CIPHER_CTX_reset.argtypes = [ctypes.c_void_p] + else: + self.EVP_CIPHER_CTX_cleanup = self._lib.EVP_CIPHER_CTX_cleanup + self.EVP_CIPHER_CTX_cleanup.restype = ctypes.c_int + self.EVP_CIPHER_CTX_cleanup.argtypes = [ctypes.c_void_p] + + self.EVP_CIPHER_CTX_free = self._lib.EVP_CIPHER_CTX_free + self.EVP_CIPHER_CTX_free.restype = None + self.EVP_CIPHER_CTX_free.argtypes = [ctypes.c_void_p] + + self.EVP_CipherUpdate = self._lib.EVP_CipherUpdate + self.EVP_CipherUpdate.restype = ctypes.c_int + self.EVP_CipherUpdate.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_int] + + self.EVP_CipherFinal_ex = self._lib.EVP_CipherFinal_ex + self.EVP_CipherFinal_ex.restype = ctypes.c_int + self.EVP_CipherFinal_ex.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p] + + self.EVP_DigestInit = self._lib.EVP_DigestInit + self.EVP_DigestInit.restype = ctypes.c_int + self._lib.EVP_DigestInit.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + + self.EVP_DigestInit_ex = self._lib.EVP_DigestInit_ex + self.EVP_DigestInit_ex.restype = ctypes.c_int + self._lib.EVP_DigestInit_ex.argtypes = 3 * [ctypes.c_void_p] + + self.EVP_DigestUpdate = self._lib.EVP_DigestUpdate + self.EVP_DigestUpdate.restype = ctypes.c_int + self.EVP_DigestUpdate.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_int] + + self.EVP_DigestFinal = self._lib.EVP_DigestFinal + self.EVP_DigestFinal.restype = ctypes.c_int + self.EVP_DigestFinal.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p] + + self.EVP_DigestFinal_ex = self._lib.EVP_DigestFinal_ex + self.EVP_DigestFinal_ex.restype = ctypes.c_int + self.EVP_DigestFinal_ex.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p] + + self.ECDSA_sign = self._lib.ECDSA_sign + self.ECDSA_sign.restype = ctypes.c_int + self.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, + ctypes.c_int, ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p] + + self.ECDSA_verify = self._lib.ECDSA_verify + self.ECDSA_verify.restype = ctypes.c_int + self.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, + ctypes.c_int, ctypes.c_void_p, + ctypes.c_int, ctypes.c_void_p] + + if self._hexversion >= 0x10100000 and not self._libreSSL: + self.EVP_MD_CTX_new = self._lib.EVP_MD_CTX_new + self.EVP_MD_CTX_new.restype = ctypes.c_void_p + self.EVP_MD_CTX_new.argtypes = [] + + self.EVP_MD_CTX_reset = self._lib.EVP_MD_CTX_reset + self.EVP_MD_CTX_reset.restype = None + self.EVP_MD_CTX_reset.argtypes = [ctypes.c_void_p] + + self.EVP_MD_CTX_free = self._lib.EVP_MD_CTX_free + self.EVP_MD_CTX_free.restype = None + self.EVP_MD_CTX_free.argtypes = [ctypes.c_void_p] + + self.EVP_sha1 = self._lib.EVP_sha1 + self.EVP_sha1.restype = ctypes.c_void_p + self.EVP_sha1.argtypes = [] + + self.digest_ecdsa_sha1 = self.EVP_sha1 + else: + self.EVP_MD_CTX_create = self._lib.EVP_MD_CTX_create + self.EVP_MD_CTX_create.restype = ctypes.c_void_p + self.EVP_MD_CTX_create.argtypes = [] + + self.EVP_MD_CTX_init = self._lib.EVP_MD_CTX_init + self.EVP_MD_CTX_init.restype = None + self.EVP_MD_CTX_init.argtypes = [ctypes.c_void_p] + + self.EVP_MD_CTX_destroy = self._lib.EVP_MD_CTX_destroy + self.EVP_MD_CTX_destroy.restype = None + self.EVP_MD_CTX_destroy.argtypes = [ctypes.c_void_p] + + self.EVP_ecdsa = self._lib.EVP_ecdsa + self._lib.EVP_ecdsa.restype = ctypes.c_void_p + self._lib.EVP_ecdsa.argtypes = [] + + self.digest_ecdsa_sha1 = self.EVP_ecdsa + + self.RAND_bytes = self._lib.RAND_bytes + self.RAND_bytes.restype = ctypes.c_int + self.RAND_bytes.argtypes = [ctypes.c_void_p, ctypes.c_int] + + self.EVP_sha256 = self._lib.EVP_sha256 + self.EVP_sha256.restype = ctypes.c_void_p + self.EVP_sha256.argtypes = [] + + self.i2o_ECPublicKey = self._lib.i2o_ECPublicKey + self.i2o_ECPublicKey.restype = ctypes.c_void_p + self.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + + self.EVP_sha512 = self._lib.EVP_sha512 + self.EVP_sha512.restype = ctypes.c_void_p + self.EVP_sha512.argtypes = [] + + self.HMAC = self._lib.HMAC + self.HMAC.restype = ctypes.c_void_p + self.HMAC.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_void_p] + + try: + self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC + except Exception: + # The above is not compatible with all versions of OSX. + self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC_SHA1 + + self.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int + self.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_void_p, ctypes.c_int, + ctypes.c_void_p, ctypes.c_int, + ctypes.c_int, ctypes.c_void_p, + ctypes.c_int, ctypes.c_void_p] + + # Blind signature requirements + self.BN_CTX_new = self._lib.BN_CTX_new + self.BN_CTX_new.restype = ctypes.c_void_p + self.BN_CTX_new.argtypes = [] + + self.BN_dup = self._lib.BN_dup + self.BN_dup.restype = ctypes.c_void_p + self.BN_dup.argtypes = [ctypes.c_void_p] + + self.BN_rand = self._lib.BN_rand + self.BN_rand.restype = ctypes.c_int + self.BN_rand.argtypes = [ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int] + + self.BN_set_word = self._lib.BN_set_word + self.BN_set_word.restype = ctypes.c_int + self.BN_set_word.argtypes = [ctypes.c_void_p, + ctypes.c_ulong] + + self.BN_mul = self._lib.BN_mul + self.BN_mul.restype = ctypes.c_int + self.BN_mul.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.BN_mod_add = self._lib.BN_mod_add + self.BN_mod_add.restype = ctypes.c_int + self.BN_mod_add.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.BN_mod_inverse = self._lib.BN_mod_inverse + self.BN_mod_inverse.restype = ctypes.c_void_p + self.BN_mod_inverse.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.BN_mod_mul = self._lib.BN_mod_mul + self.BN_mod_mul.restype = ctypes.c_int + self.BN_mod_mul.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.BN_lshift = self._lib.BN_lshift + self.BN_lshift.restype = ctypes.c_int + self.BN_lshift.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_int] + + self.BN_sub_word = self._lib.BN_sub_word + self.BN_sub_word.restype = ctypes.c_int + self.BN_sub_word.argtypes = [ctypes.c_void_p, + ctypes.c_ulong] + + self.BN_cmp = self._lib.BN_cmp + self.BN_cmp.restype = ctypes.c_int + self.BN_cmp.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + try: + self.BN_is_odd = self._lib.BN_is_odd + self.BN_is_odd.restype = ctypes.c_int + self.BN_is_odd.argtypes = [ctypes.c_void_p] + except AttributeError: + # OpenSSL 1.1.0 implements this as a function, but earlier + # versions as macro, so we need to workaround + self.BN_is_odd = self.BN_is_odd_compatible + + self.BN_bn2dec = self._lib.BN_bn2dec + self.BN_bn2dec.restype = ctypes.c_char_p + self.BN_bn2dec.argtypes = [ctypes.c_void_p] + + self.EC_GROUP_new_by_curve_name = self._lib.EC_GROUP_new_by_curve_name + self.EC_GROUP_new_by_curve_name.restype = ctypes.c_void_p + self.EC_GROUP_new_by_curve_name.argtypes = [ctypes.c_int] + + self.EC_GROUP_get_order = self._lib.EC_GROUP_get_order + self.EC_GROUP_get_order.restype = ctypes.c_int + self.EC_GROUP_get_order.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_GROUP_get_cofactor = self._lib.EC_GROUP_get_cofactor + self.EC_GROUP_get_cofactor.restype = ctypes.c_int + self.EC_GROUP_get_cofactor.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_GROUP_get0_generator = self._lib.EC_GROUP_get0_generator + self.EC_GROUP_get0_generator.restype = ctypes.c_void_p + self.EC_GROUP_get0_generator.argtypes = [ctypes.c_void_p] + + self.EC_POINT_copy = self._lib.EC_POINT_copy + self.EC_POINT_copy.restype = ctypes.c_int + self.EC_POINT_copy.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_POINT_add = self._lib.EC_POINT_add + self.EC_POINT_add.restype = ctypes.c_int + self.EC_POINT_add.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_POINT_cmp = self._lib.EC_POINT_cmp + self.EC_POINT_cmp.restype = ctypes.c_int + self.EC_POINT_cmp.argtypes = [ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_void_p] + + self.EC_POINT_set_to_infinity = self._lib.EC_POINT_set_to_infinity + self.EC_POINT_set_to_infinity.restype = ctypes.c_int + self.EC_POINT_set_to_infinity.argtypes = [ctypes.c_void_p, + ctypes.c_void_p] + + self._set_ciphers() + self._set_curves() + + def _set_ciphers(self): + self.cipher_algo = { + 'aes-128-cbc': CipherName( + 'aes-128-cbc', self.EVP_aes_128_cbc, 16), + 'aes-256-cbc': CipherName( + 'aes-256-cbc', self.EVP_aes_256_cbc, 16), + 'aes-128-cfb': CipherName( + 'aes-128-cfb', self.EVP_aes_128_cfb128, 16), + 'aes-256-cfb': CipherName( + 'aes-256-cfb', self.EVP_aes_256_cfb128, 16), + 'aes-128-ofb': CipherName( + 'aes-128-ofb', self._lib.EVP_aes_128_ofb, 16), + 'aes-256-ofb': CipherName( + 'aes-256-ofb', self._lib.EVP_aes_256_ofb, 16), + # 'aes-128-ctr': CipherName( + # 'aes-128-ctr', self._lib.EVP_aes_128_ctr, 16), + # 'aes-256-ctr': CipherName( + # 'aes-256-ctr', self._lib.EVP_aes_256_ctr, 16), + 'bf-cfb': CipherName( + 'bf-cfb', self.EVP_bf_cfb64, 8), + 'bf-cbc': CipherName( + 'bf-cbc', self.EVP_bf_cbc, 8), + # 128 is the initialisation size not block size + 'rc4': CipherName( + 'rc4', self.EVP_rc4, 128), + } + + def _set_curves(self): + self.curves = { + 'secp112r1': 704, + 'secp112r2': 705, + 'secp128r1': 706, + 'secp128r2': 707, + 'secp160k1': 708, + 'secp160r1': 709, + 'secp160r2': 710, + 'secp192k1': 711, + 'secp224k1': 712, + 'secp224r1': 713, + 'secp256k1': 714, + 'secp384r1': 715, + 'secp521r1': 716, + 'sect113r1': 717, + 'sect113r2': 718, + 'sect131r1': 719, + 'sect131r2': 720, + 'sect163k1': 721, + 'sect163r1': 722, + 'sect163r2': 723, + 'sect193r1': 724, + 'sect193r2': 725, + 'sect233k1': 726, + 'sect233r1': 727, + 'sect239k1': 728, + 'sect283k1': 729, + 'sect283r1': 730, + 'sect409k1': 731, + 'sect409r1': 732, + 'sect571k1': 733, + 'sect571r1': 734, + } + + def BN_num_bytes(self, x): + """ + returns the length of a BN (OpenSSl API) + """ + return int((self.BN_num_bits(x) + 7) / 8) + + def BN_is_odd_compatible(self, x): + """ + returns if BN is odd + we assume big endianness, and that BN is initialised + """ + length = self.BN_num_bytes(x) + data = self.malloc(0, length) + OpenSSL.BN_bn2bin(x, data) + return ord(data[length - 1]) & 1 + + def get_cipher(self, name): + """ + returns the OpenSSL cipher instance + """ + if name not in self.cipher_algo: + raise Exception("Unknown cipher") + return self.cipher_algo[name] + + def get_curve(self, name): + """ + returns the id of a elliptic curve + """ + if name not in self.curves: + raise Exception("Unknown curve") + return self.curves[name] + + def get_curve_by_id(self, id_): + """ + returns the name of a elliptic curve with his id + """ + res = None + for i in self.curves: + if self.curves[i] == id_: + res = i + break + if res is None: + raise Exception("Unknown curve") + return res + + def rand(self, size): + """ + OpenSSL random function + """ + buffer_ = self.malloc(0, size) + # This pyelliptic library, by default, didn't check the return value + # of RAND_bytes. It is evidently possible that it returned an error + # and not-actually-random data. However, in tests on various + # operating systems, while generating hundreds of gigabytes of random + # strings of various sizes I could not get an error to occur. + # Also Bitcoin doesn't check the return value of RAND_bytes either. + # Fixed in Bitmessage version 0.4.2 (in source code on 2013-10-13) + while self.RAND_bytes(buffer_, size) != 1: + import time + time.sleep(1) + return buffer_.raw + + def malloc(self, data, size): + """ + returns a create_string_buffer (ctypes) + """ + buffer_ = None + if data != 0: + if sys.version_info.major == 3 and isinstance(data, type('')): + data = data.encode() + buffer_ = self.create_string_buffer(data, size) + else: + buffer_ = self.create_string_buffer(size) + return buffer_ + + +def loadOpenSSL(): + """This function finds and load the OpenSSL library""" + # pylint: disable=global-statement + global OpenSSL + from os import path, environ + from ctypes.util import find_library + + libdir = [] + if getattr(sys, 'frozen', None): + if 'darwin' in sys.platform: + libdir.extend([ + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.1.1.0.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.1.0.2.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.1.0.1.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.1.0.0.dylib'), + path.join( + environ['RESOURCEPATH'], '..', + 'Frameworks', 'libcrypto.0.9.8.dylib'), + ]) + elif 'win32' in sys.platform or 'win64' in sys.platform: + libdir.append(path.join(sys._MEIPASS, 'libeay32.dll')) + else: + libdir.extend([ + path.join(sys._MEIPASS, 'libcrypto.so'), + path.join(sys._MEIPASS, 'libssl.so'), + path.join(sys._MEIPASS, 'libcrypto.so.1.1.0'), + path.join(sys._MEIPASS, 'libssl.so.1.1.0'), + path.join(sys._MEIPASS, 'libcrypto.so.1.0.2'), + path.join(sys._MEIPASS, 'libssl.so.1.0.2'), + path.join(sys._MEIPASS, 'libcrypto.so.1.0.1'), + path.join(sys._MEIPASS, 'libssl.so.1.0.1'), + path.join(sys._MEIPASS, 'libcrypto.so.1.0.0'), + path.join(sys._MEIPASS, 'libssl.so.1.0.0'), + path.join(sys._MEIPASS, 'libcrypto.so.0.9.8'), + path.join(sys._MEIPASS, 'libssl.so.0.9.8'), + ]) + if 'darwin' in sys.platform: + libdir.extend([ + 'libcrypto.dylib', '/usr/local/opt/openssl/lib/libcrypto.dylib']) + elif 'win32' in sys.platform or 'win64' in sys.platform: + libdir.append('libeay32.dll') + else: + libdir.append('libcrypto.so') + libdir.append('libssl.so') + libdir.append('libcrypto.so.1.0.0') + libdir.append('libssl.so.1.0.0') + if 'linux' in sys.platform or 'darwin' in sys.platform \ + or 'bsd' in sys.platform: + libdir.append(find_library('ssl')) + elif 'win32' in sys.platform or 'win64' in sys.platform: + libdir.append(find_library('libeay32')) + for library in libdir: + try: + OpenSSL = _OpenSSL(library) + return + except Exception: + pass + raise Exception( + "Couldn't find and load the OpenSSL library. You must install it.") + + +loadOpenSSL() diff --git a/src/tests/mock/pybitmessage/pyelliptic/tests/__init__.py b/src/tests/mock/pybitmessage/pyelliptic/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/tests/mock/pybitmessage/pyelliptic/tests/test_arithmetic.py b/src/tests/mock/pybitmessage/pyelliptic/tests/test_arithmetic.py new file mode 100644 index 00000000..7b5c59b1 --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/tests/test_arithmetic.py @@ -0,0 +1,84 @@ +""" +Test the arithmetic functions +""" + +from binascii import unhexlify +import unittest + +try: + from pyelliptic import arithmetic +except ImportError: + from pybitmessage.pyelliptic import arithmetic + + +# These keys are from addresses test script +sample_pubsigningkey = ( + b'044a367f049ec16cb6b6118eb734a9962d10b8db59c890cd08f210c43ff08bdf09d' + b'16f502ca26cd0713f38988a1237f1fc8fa07b15653c996dc4013af6d15505ce') +sample_pubencryptionkey = ( + b'044597d59177fc1d89555d38915f581b5ff2286b39d022ca0283d2bdd5c36be5d3c' + b'e7b9b97792327851a562752e4b79475d1f51f5a71352482b241227f45ed36a9') +sample_privsigningkey = \ + b'93d0b61371a54b53df143b954035d612f8efa8a3ed1cf842c2186bfd8f876665' +sample_privencryptionkey = \ + b'4b0b73a54e19b059dc274ab69df095fe699f43b17397bca26fdf40f4d7400a3a' + +sample_factor = \ + 66858749573256452658262553961707680376751171096153613379801854825275240965733 +# G * sample_factor +sample_point = ( + 33567437183004486938355437500683826356288335339807546987348409590129959362313, + 94730058721143827257669456336351159718085716196507891067256111928318063085006 +) + + +class TestArithmetic(unittest.TestCase): + """Test arithmetic functions""" + def test_base10_multiply(self): + """Test arithmetic.base10_multiply""" + self.assertEqual( + sample_point, + arithmetic.base10_multiply(arithmetic.G, sample_factor)) + + def test_decode(self): + """Decode sample privsigningkey from hex to int and compare to factor""" + self.assertEqual( + arithmetic.decode(sample_privsigningkey, 16), sample_factor) + + def test_encode(self): + """Encode sample factor into hex and compare to privsigningkey""" + self.assertEqual( + arithmetic.encode(sample_factor, 16), sample_privsigningkey) + + def test_changebase(self): + """Check the results of changebase()""" + self.assertEqual( + arithmetic.changebase(sample_privsigningkey, 16, 256, minlen=32), + unhexlify(sample_privsigningkey)) + self.assertEqual( + arithmetic.changebase(sample_pubsigningkey, 16, 256, minlen=64), + unhexlify(sample_pubsigningkey)) + self.assertEqual( + 32, # padding + len(arithmetic.changebase(sample_privsigningkey[:5], 16, 256, 32))) + + def test_hex_to_point(self): + """Check that sample_pubsigningkey is sample_point encoded in hex""" + self.assertEqual( + arithmetic.hex_to_point(sample_pubsigningkey), sample_point) + + def test_point_to_hex(self): + """Check that sample_point is sample_pubsigningkey decoded from hex""" + self.assertEqual( + arithmetic.point_to_hex(sample_point), sample_pubsigningkey) + + def test_privtopub(self): + """Generate public keys and check the result""" + self.assertEqual( + arithmetic.privtopub(sample_privsigningkey), + sample_pubsigningkey + ) + self.assertEqual( + arithmetic.privtopub(sample_privencryptionkey), + sample_pubencryptionkey + ) diff --git a/src/tests/mock/pybitmessage/pyelliptic/tests/test_blindsig.py b/src/tests/mock/pybitmessage/pyelliptic/tests/test_blindsig.py new file mode 100644 index 00000000..9ed72081 --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/tests/test_blindsig.py @@ -0,0 +1,277 @@ +""" +Test for ECC blind signatures +""" +import os +import unittest +from hashlib import sha256 + +try: + from pyelliptic import ECCBlind, ECCBlindChain, OpenSSL +except ImportError: + from pybitmessage.pyelliptic import ECCBlind, ECCBlindChain, OpenSSL + +# pylint: disable=protected-access + + +class TestBlindSig(unittest.TestCase): + """ + Test case for ECC blind signature + """ + def test_blind_sig(self): + """Test full sequence using a random certifier key and a random message""" + # See page 127 of the paper + # (1) Initialization + signer_obj = ECCBlind() + point_r = signer_obj.signer_init() + self.assertEqual(len(signer_obj.pubkey()), 35) + + # (2) Request + requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) + # only 64 byte messages are planned to be used in Bitmessage + msg = os.urandom(64) + msg_blinded = requester_obj.create_signing_request(point_r, msg) + self.assertEqual(len(msg_blinded), 32) + + # check + self.assertNotEqual(sha256(msg).digest(), msg_blinded) + + # (3) Signature Generation + signature_blinded = signer_obj.blind_sign(msg_blinded) + assert isinstance(signature_blinded, bytes) + self.assertEqual(len(signature_blinded), 32) + + # (4) Extraction + signature = requester_obj.unblind(signature_blinded) + assert isinstance(signature, bytes) + self.assertEqual(len(signature), 65) + + self.assertNotEqual(signature, signature_blinded) + + # (5) Verification + verifier_obj = ECCBlind(pubkey=signer_obj.pubkey()) + self.assertTrue(verifier_obj.verify(msg, signature)) + + def test_is_odd(self): + """Test our implementation of BN_is_odd""" + for _ in range(1024): + obj = ECCBlind() + x = OpenSSL.BN_new() + y = OpenSSL.BN_new() + OpenSSL.EC_POINT_get_affine_coordinates( + obj.group, obj.Q, x, y, 0) + self.assertEqual(OpenSSL.BN_is_odd(y), + OpenSSL.BN_is_odd_compatible(y)) + + def test_serialize_ec_point(self): + """Test EC point serialization/deserialization""" + for _ in range(1024): + try: + obj = ECCBlind() + obj2 = ECCBlind() + randompoint = obj.Q + serialized = obj._ec_point_serialize(randompoint) + secondpoint = obj2._ec_point_deserialize(serialized) + x0 = OpenSSL.BN_new() + y0 = OpenSSL.BN_new() + OpenSSL.EC_POINT_get_affine_coordinates(obj.group, + randompoint, x0, + y0, obj.ctx) + x1 = OpenSSL.BN_new() + y1 = OpenSSL.BN_new() + OpenSSL.EC_POINT_get_affine_coordinates(obj2.group, + secondpoint, x1, + y1, obj2.ctx) + + self.assertEqual(OpenSSL.BN_cmp(y0, y1), 0) + self.assertEqual(OpenSSL.BN_cmp(x0, x1), 0) + self.assertEqual(OpenSSL.EC_POINT_cmp(obj.group, randompoint, + secondpoint, 0), 0) + finally: + OpenSSL.BN_free(x0) + OpenSSL.BN_free(x1) + OpenSSL.BN_free(y0) + OpenSSL.BN_free(y1) + del obj + del obj2 + + def test_serialize_bn(self): + """Test Bignum serialization/deserialization""" + for _ in range(1024): + obj = ECCBlind() + obj2 = ECCBlind() + randomnum = obj.d + serialized = obj._bn_serialize(randomnum) + secondnum = obj2._bn_deserialize(serialized) + self.assertEqual(OpenSSL.BN_cmp(randomnum, secondnum), 0) + + def test_blind_sig_many(self): + """Test a lot of blind signatures""" + for _ in range(1024): + self.test_blind_sig() + + def test_blind_sig_value(self): + """Test blind signature value checking""" + signer_obj = ECCBlind(value=5) + point_r = signer_obj.signer_init() + requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) + msg = os.urandom(64) + msg_blinded = requester_obj.create_signing_request(point_r, msg) + signature_blinded = signer_obj.blind_sign(msg_blinded) + signature = requester_obj.unblind(signature_blinded) + verifier_obj = ECCBlind(pubkey=signer_obj.pubkey()) + self.assertFalse(verifier_obj.verify(msg, signature, value=8)) + + def test_blind_sig_expiration(self): + """Test blind signature expiration checking""" + signer_obj = ECCBlind(year=2020, month=1) + point_r = signer_obj.signer_init() + requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) + msg = os.urandom(64) + msg_blinded = requester_obj.create_signing_request(point_r, msg) + signature_blinded = signer_obj.blind_sign(msg_blinded) + signature = requester_obj.unblind(signature_blinded) + verifier_obj = ECCBlind(pubkey=signer_obj.pubkey()) + self.assertFalse(verifier_obj.verify(msg, signature)) + + def test_blind_sig_chain(self): # pylint: disable=too-many-locals + """Test blind signature chain using a random certifier key and a random message""" + + test_levels = 4 + msg = os.urandom(1024) + + ca = ECCBlind() + signer_obj = ca + + output = bytearray() + + for level in range(test_levels): + if not level: + output.extend(ca.pubkey()) + requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) + child_obj = ECCBlind() + point_r = signer_obj.signer_init() + pubkey = child_obj.pubkey() + + if level == test_levels - 1: + msg_blinded = requester_obj.create_signing_request(point_r, + msg) + else: + msg_blinded = requester_obj.create_signing_request(point_r, + pubkey) + signature_blinded = signer_obj.blind_sign(msg_blinded) + signature = requester_obj.unblind(signature_blinded) + if level != test_levels - 1: + output.extend(pubkey) + output.extend(signature) + signer_obj = child_obj + verifychain = ECCBlindChain(ca=ca.pubkey(), chain=bytes(output)) + self.assertTrue(verifychain.verify(msg=msg, value=1)) + + def test_blind_sig_chain_wrong_ca(self): # pylint: disable=too-many-locals + """Test blind signature chain with an unlisted ca""" + + test_levels = 4 + msg = os.urandom(1024) + + ca = ECCBlind() + fake_ca = ECCBlind() + signer_obj = fake_ca + + output = bytearray() + + for level in range(test_levels): + requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) + child_obj = ECCBlind() + if not level: + # unlisted CA, but a syntactically valid pubkey + output.extend(fake_ca.pubkey()) + point_r = signer_obj.signer_init() + pubkey = child_obj.pubkey() + + if level == test_levels - 1: + msg_blinded = requester_obj.create_signing_request(point_r, + msg) + else: + msg_blinded = requester_obj.create_signing_request(point_r, + pubkey) + signature_blinded = signer_obj.blind_sign(msg_blinded) + signature = requester_obj.unblind(signature_blinded) + if level != test_levels - 1: + output.extend(pubkey) + output.extend(signature) + signer_obj = child_obj + verifychain = ECCBlindChain(ca=ca.pubkey(), chain=bytes(output)) + self.assertFalse(verifychain.verify(msg, 1)) + + def test_blind_sig_chain_wrong_msg(self): # pylint: disable=too-many-locals + """Test blind signature chain with a fake message""" + + test_levels = 4 + msg = os.urandom(1024) + fake_msg = os.urandom(1024) + + ca = ECCBlind() + signer_obj = ca + + output = bytearray() + + for level in range(test_levels): + if not level: + output.extend(ca.pubkey()) + requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) + child_obj = ECCBlind() + point_r = signer_obj.signer_init() + pubkey = child_obj.pubkey() + + if level == test_levels - 1: + msg_blinded = requester_obj.create_signing_request(point_r, + msg) + else: + msg_blinded = requester_obj.create_signing_request(point_r, + pubkey) + signature_blinded = signer_obj.blind_sign(msg_blinded) + signature = requester_obj.unblind(signature_blinded) + if level != test_levels - 1: + output.extend(pubkey) + output.extend(signature) + signer_obj = child_obj + verifychain = ECCBlindChain(ca=ca.pubkey(), chain=bytes(output)) + self.assertFalse(verifychain.verify(fake_msg, 1)) + + def test_blind_sig_chain_wrong_intermediary(self): # pylint: disable=too-many-locals + """Test blind signature chain using a fake intermediary pubkey""" + + test_levels = 4 + msg = os.urandom(1024) + wrong_level = 2 + + ca = ECCBlind() + signer_obj = ca + fake_intermediary = ECCBlind() + + output = bytearray() + + for level in range(test_levels): + if not level: + output.extend(ca.pubkey()) + requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) + child_obj = ECCBlind() + point_r = signer_obj.signer_init() + pubkey = child_obj.pubkey() + + if level == test_levels - 1: + msg_blinded = requester_obj.create_signing_request(point_r, + msg) + else: + msg_blinded = requester_obj.create_signing_request(point_r, + pubkey) + signature_blinded = signer_obj.blind_sign(msg_blinded) + signature = requester_obj.unblind(signature_blinded) + if level == wrong_level: + output.extend(fake_intermediary.pubkey()) + elif level != test_levels - 1: + output.extend(pubkey) + output.extend(signature) + signer_obj = child_obj + verifychain = ECCBlindChain(ca=ca.pubkey(), chain=bytes(output)) + self.assertFalse(verifychain.verify(msg, 1)) diff --git a/src/tests/mock/pybitmessage/pyelliptic/tests/test_openssl.py b/src/tests/mock/pybitmessage/pyelliptic/tests/test_openssl.py new file mode 100644 index 00000000..cb789277 --- /dev/null +++ b/src/tests/mock/pybitmessage/pyelliptic/tests/test_openssl.py @@ -0,0 +1,57 @@ +""" +Test if OpenSSL is working correctly +""" +import unittest + +try: + from pyelliptic.openssl import OpenSSL +except ImportError: + from pybitmessage.pyelliptic import OpenSSL + +try: + OpenSSL.BN_bn2binpad + have_pad = True +except AttributeError: + have_pad = None + + +class TestOpenSSL(unittest.TestCase): + """ + Test cases for OpenSSL + """ + def test_is_odd(self): + """Test BN_is_odd implementation""" + ctx = OpenSSL.BN_CTX_new() + a = OpenSSL.BN_new() + group = OpenSSL.EC_GROUP_new_by_curve_name( + OpenSSL.get_curve("secp256k1")) + OpenSSL.EC_GROUP_get_order(group, a, ctx) + + bad = 0 + for _ in range(1024): + OpenSSL.BN_rand(a, OpenSSL.BN_num_bits(a), 0, 0) + if not OpenSSL.BN_is_odd(a) == OpenSSL.BN_is_odd_compatible(a): + bad += 1 + self.assertEqual(bad, 0) + + @unittest.skipUnless(have_pad, 'Skipping OpenSSL pad test') + def test_padding(self): + """Test an alternative implementation of bn2binpad""" + + ctx = OpenSSL.BN_CTX_new() + a = OpenSSL.BN_new() + n = OpenSSL.BN_new() + group = OpenSSL.EC_GROUP_new_by_curve_name( + OpenSSL.get_curve("secp256k1")) + OpenSSL.EC_GROUP_get_order(group, n, ctx) + + bad = 0 + for _ in range(1024): + OpenSSL.BN_rand(a, OpenSSL.BN_num_bits(n), 0, 0) + b = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(n)) + c = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(a)) + OpenSSL.BN_bn2binpad(a, b, OpenSSL.BN_num_bytes(n)) + OpenSSL.BN_bn2bin(a, c) + if b.raw != c.raw.rjust(OpenSSL.BN_num_bytes(n), b'\x00'): + bad += 1 + self.assertEqual(bad, 0) diff --git a/src/tests/mock/pybitmessage/qidenticon.py b/src/tests/mock/pybitmessage/qidenticon.py new file mode 100644 index 00000000..30b61b9b --- /dev/null +++ b/src/tests/mock/pybitmessage/qidenticon.py @@ -0,0 +1,276 @@ +### +# qidenticon.py is Licesensed under FreeBSD License. +# (http://www.freebsd.org/copyright/freebsd-license.html) +# +# Copyright 1994-2009 Shin Adachi. All rights reserved. +# Copyright 2013 "Sendiulo". All rights reserved. +# Copyright 2018-2021 The Bitmessage Developers. All rights reserved. +# +# Redistribution and use in source and binary forms, +# with or without modification, are permitted provided that the following +# conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +### + +# pylint: disable=too-many-locals,too-many-arguments,too-many-function-args +""" +Usage +----- + +>>> import qidenticon +>>> qidenticon.render_identicon(code, size) + +Returns an instance of :class:`QPixmap` which have generated identicon image. +``size`` specifies `patch size`. Generated image size is 3 * ``size``. +""" + +from six.moves import range + +try: + from PyQt5 import QtCore, QtGui +except ImportError: + from PyQt4 import QtCore, QtGui + + +class IdenticonRendererBase(object): + """Encapsulate methods around rendering identicons""" + + PATH_SET = [] + + def __init__(self, code): + """ + :param code: code for icon + """ + if not isinstance(code, int): + code = int(code) + self.code = code + + def render(self, size, twoColor, opacity, penwidth): + """ + render identicon to QPixmap + + :param size: identicon patchsize. (image size is 3 * [size]) + :returns: :class:`QPixmap` + """ + + # decode the code + middle, corner, side, foreColor, secondColor, swap_cross = \ + self.decode(self.code, twoColor) + + # make image + image = QtGui.QPixmap( + QtCore.QSize(size * 3 + penwidth, size * 3 + penwidth)) + + # fill background + backColor = QtGui.QColor(255, 255, 255, opacity) + image.fill(backColor) + + kwds = { + 'image': image, + 'size': size, + 'foreColor': foreColor if swap_cross else secondColor, + 'penwidth': penwidth, + 'backColor': backColor} + + # middle patch + image = self.drawPatchQt( + (1, 1), middle[2], middle[1], middle[0], **kwds) + + # side patch + kwds['foreColor'] = foreColor + kwds['patch_type'] = side[0] + for i in range(4): + pos = [(1, 0), (2, 1), (1, 2), (0, 1)][i] + image = self.drawPatchQt(pos, side[2] + 1 + i, side[1], **kwds) + + # corner patch + kwds['foreColor'] = secondColor + kwds['patch_type'] = corner[0] + for i in range(4): + pos = [(0, 0), (2, 0), (2, 2), (0, 2)][i] + image = self.drawPatchQt(pos, corner[2] + 1 + i, corner[1], **kwds) + + return image + + def drawPatchQt( + self, pos, turn, invert, patch_type, image, size, foreColor, + backColor, penwidth): # pylint: disable=unused-argument + """ + :param size: patch size + """ + path = self.PATH_SET[patch_type] + if not path: + # blank patch + invert = not invert + path = [(0., 0.), (1., 0.), (1., 1.), (0., 1.), (0., 0.)] + + polygon = QtGui.QPolygonF([ + QtCore.QPointF(x * size, y * size) for x, y in path]) + + rot = turn % 4 + rect = [ + QtCore.QPointF(0., 0.), QtCore.QPointF(size, 0.), + QtCore.QPointF(size, size), QtCore.QPointF(0., size)] + rotation = [0, 90, 180, 270] + + nopen = QtGui.QPen(foreColor, QtCore.Qt.NoPen) + foreBrush = QtGui.QBrush(foreColor, QtCore.Qt.SolidPattern) + if penwidth > 0: + pen_color = QtGui.QColor(255, 255, 255) + pen = QtGui.QPen(pen_color, QtCore.Qt.SolidPattern) + pen.setWidth(penwidth) + + painter = QtGui.QPainter() + painter.begin(image) + painter.setPen(nopen) + + painter.translate( + pos[0] * size + penwidth / 2, pos[1] * size + penwidth / 2) + painter.translate(rect[rot]) + painter.rotate(rotation[rot]) + + if invert: + # subtract the actual polygon from a rectangle to invert it + poly_rect = QtGui.QPolygonF(rect) + polygon = poly_rect.subtracted(polygon) + painter.setBrush(foreBrush) + if penwidth > 0: + # draw the borders + painter.setPen(pen) + painter.drawPolygon(polygon, QtCore.Qt.WindingFill) + # draw the fill + painter.setPen(nopen) + painter.drawPolygon(polygon, QtCore.Qt.WindingFill) + + painter.end() + + return image + + def decode(self, code, twoColor): + """virtual functions""" + raise NotImplementedError + + +class DonRenderer(IdenticonRendererBase): + """ + Don Park's implementation of identicon, see: + https://blog.docuverse.com/2007/01/18/identicon-updated-and-source-released + """ + + PATH_SET = [ + # [0] full square: + [(0, 0), (4, 0), (4, 4), (0, 4)], + # [1] right-angled triangle pointing top-left: + [(0, 0), (4, 0), (0, 4)], + # [2] upwardy triangle: + [(2, 0), (4, 4), (0, 4)], + # [3] left half of square, standing rectangle: + [(0, 0), (2, 0), (2, 4), (0, 4)], + # [4] square standing on diagonale: + [(2, 0), (4, 2), (2, 4), (0, 2)], + # [5] kite pointing topleft: + [(0, 0), (4, 2), (4, 4), (2, 4)], + # [6] Sierpinski triangle, fractal triangles: + [(2, 0), (4, 4), (2, 4), (3, 2), (1, 2), (2, 4), (0, 4)], + # [7] sharp angled lefttop pointing triangle: + [(0, 0), (4, 2), (2, 4)], + # [8] small centered square: + [(1, 1), (3, 1), (3, 3), (1, 3)], + # [9] two small triangles: + [(2, 0), (4, 0), (0, 4), (0, 2), (2, 2)], + # [10] small topleft square: + [(0, 0), (2, 0), (2, 2), (0, 2)], + # [11] downpointing right-angled triangle on bottom: + [(0, 2), (4, 2), (2, 4)], + # [12] uppointing right-angled triangle on bottom: + [(2, 2), (4, 4), (0, 4)], + # [13] small rightbottom pointing right-angled triangle on topleft: + [(2, 0), (2, 2), (0, 2)], + # [14] small lefttop pointing right-angled triangle on topleft: + [(0, 0), (2, 0), (0, 2)], + # [15] empty: + []] + # get the [0] full square, [4] square standing on diagonale, + # [8] small centered square, or [15] empty tile: + MIDDLE_PATCH_SET = [0, 4, 8, 15] + + # modify path set + for idx, path in enumerate(PATH_SET): + if path: + p = [(vec[0] / 4.0, vec[1] / 4.0) for vec in path] + PATH_SET[idx] = p + p[:1] + + def decode(self, code, twoColor): + """decode the code""" + + shift = 0 + middleType = (code >> shift) & 0x03 + shift += 2 + middleInvert = (code >> shift) & 0x01 + shift += 1 + cornerType = (code >> shift) & 0x0F + shift += 4 + cornerInvert = (code >> shift) & 0x01 + shift += 1 + cornerTurn = (code >> shift) & 0x03 + shift += 2 + sideType = (code >> shift) & 0x0F + shift += 4 + sideInvert = (code >> shift) & 0x01 + shift += 1 + sideTurn = (code >> shift) & 0x03 + shift += 2 + blue = (code >> shift) & 0x1F + shift += 5 + green = (code >> shift) & 0x1F + shift += 5 + red = (code >> shift) & 0x1F + shift += 5 + second_blue = (code >> shift) & 0x1F + shift += 5 + second_green = (code >> shift) & 0x1F + shift += 5 + second_red = (code >> shift) & 0x1F + shift += 1 + swap_cross = (code >> shift) & 0x01 + + middleType = self.MIDDLE_PATCH_SET[middleType] + + foreColor = (red << 3, green << 3, blue << 3) + foreColor = QtGui.QColor(*foreColor) + + if twoColor: + secondColor = ( + second_blue << 3, second_green << 3, second_red << 3) + secondColor = QtGui.QColor(*secondColor) + else: + secondColor = foreColor + + return (middleType, middleInvert, 0),\ + (cornerType, cornerInvert, cornerTurn),\ + (sideType, sideInvert, sideTurn),\ + foreColor, secondColor, swap_cross + + +def render_identicon( + code, size, twoColor=False, opacity=255, penwidth=0, renderer=None): + """Render an image""" + if not renderer: + renderer = DonRenderer + return renderer(code).render(size, twoColor, opacity, penwidth) diff --git a/src/tests/mock/pybitmessage/queues.py b/src/tests/mock/pybitmessage/queues.py new file mode 100644 index 00000000..4a9b98d2 --- /dev/null +++ b/src/tests/mock/pybitmessage/queues.py @@ -0,0 +1,55 @@ +"""Most of the queues used by bitmessage threads are defined here.""" + +import threading +import time + +from six.moves import queue + +try: + from multiqueue import MultiQueue +except ImportError: + from .multiqueue import MultiQueue + + +class ObjectProcessorQueue(queue.Queue): + """Special queue class using lock for `.threads.objectProcessor`""" + + maxSize = 32000000 + + def __init__(self): + queue.Queue.__init__(self) + self.sizeLock = threading.Lock() + #: in Bytes. We maintain this to prevent nodes from flooding us + #: with objects which take up too much memory. If this gets + #: too big we'll sleep before asking for further objects. + self.curSize = 0 + + def put(self, item, block=True, timeout=None): + while self.curSize >= self.maxSize: + time.sleep(1) + with self.sizeLock: + self.curSize += len(item[1]) + queue.Queue.put(self, item, block, timeout) + + def get(self, block=True, timeout=None): + item = queue.Queue.get(self, block, timeout) + with self.sizeLock: + self.curSize -= len(item[1]) + return item + + +workerQueue = queue.Queue() +UISignalQueue = queue.Queue() +addressGeneratorQueue = queue.Queue() +#: `.network.ReceiveQueueThread` instances dump objects they hear +#: on the network into this queue to be processed. +objectProcessorQueue = ObjectProcessorQueue() +invQueue = MultiQueue() +addrQueue = MultiQueue() +portCheckerQueue = queue.Queue() +receiveDataQueue = queue.Queue() +#: The address generator thread uses this queue to get information back +#: to the API thread. +apiAddressGeneratorReturnQueue = queue.Queue() +#: for exceptions +excQueue = queue.Queue() diff --git a/src/tests/mock/pybitmessage/randomtrackingdict.py b/src/tests/mock/pybitmessage/randomtrackingdict.py new file mode 100644 index 00000000..5bf19181 --- /dev/null +++ b/src/tests/mock/pybitmessage/randomtrackingdict.py @@ -0,0 +1,132 @@ +""" +Track randomize ordered dict +""" +from threading import RLock +from time import time + +try: + import helper_random +except ImportError: + from . import helper_random + + +class RandomTrackingDict(object): + """ + Dict with randomised order and tracking. + + Keeps a track of how many items have been requested from the dict, + and timeouts. Resets after all objects have been retrieved and timed out. + The main purpose of this isn't as much putting related code together + as performance optimisation and anonymisation of downloading of objects + from other peers. If done using a standard dict or array, it takes + too much CPU (and looks convoluted). Randomisation helps with anonymity. + """ + # pylint: disable=too-many-instance-attributes + maxPending = 10 + pendingTimeout = 60 + + def __init__(self): + self.dictionary = {} + self.indexDict = [] + self.len = 0 + self.pendingLen = 0 + self.lastPoll = 0 + self.lastObject = 0 + self.lock = RLock() + + def __len__(self): + return self.len + + def __contains__(self, key): + return key in self.dictionary + + def __getitem__(self, key): + return self.dictionary[key][1] + + def _swap(self, i1, i2): + with self.lock: + key1 = self.indexDict[i1] + key2 = self.indexDict[i2] + self.indexDict[i1] = key2 + self.indexDict[i2] = key1 + self.dictionary[key1][0] = i2 + self.dictionary[key2][0] = i1 + # for quick reassignment + return i2 + + def __setitem__(self, key, value): + with self.lock: + if key in self.dictionary: + self.dictionary[key][1] = value + else: + self.indexDict.append(key) + self.dictionary[key] = [self.len, value] + self._swap(self.len, self.len - self.pendingLen) + self.len += 1 + + def __delitem__(self, key): + if key not in self.dictionary: + raise KeyError + with self.lock: + index = self.dictionary[key][0] + # not pending + if index < self.len - self.pendingLen: + # left of pending part + index = self._swap(index, self.len - self.pendingLen - 1) + # pending + else: + self.pendingLen -= 1 + # end + self._swap(index, self.len - 1) + # if the following del is batched, performance of this single + # operation can improve 4x, but it's already very fast so we'll + # ignore it for the time being + del self.indexDict[-1] + del self.dictionary[key] + self.len -= 1 + + def setMaxPending(self, maxPending): + """ + Sets maximum number of objects that can be retrieved from the class + simultaneously as long as there is no timeout + """ + self.maxPending = maxPending + + def setPendingTimeout(self, pendingTimeout): + """Sets how long to wait for a timeout if max pending is reached + (or all objects have been retrieved)""" + self.pendingTimeout = pendingTimeout + + def setLastObject(self): + """Update timestamp for tracking of received objects""" + self.lastObject = time() + + def randomKeys(self, count=1): + """Retrieve count random keys from the dict + that haven't already been retrieved""" + if self.len == 0 or ( + (self.pendingLen >= self.maxPending or self.pendingLen == self.len) + and self.lastPoll + self.pendingTimeout > time()): + raise KeyError + + # pylint: disable=redefined-outer-name + with self.lock: + # reset if we've requested all + # and if last object received too long time ago + if self.pendingLen == self.len and self.lastObject + \ + self.pendingTimeout < time(): + self.pendingLen = 0 + self.setLastObject() + available = self.len - self.pendingLen + if count > available: + count = available + randomIndex = helper_random.randomsample( + range(self.len - self.pendingLen), count) + retval = [self.indexDict[i] for i in randomIndex] + + for i in sorted(randomIndex, reverse=True): + # swap with one below lowest pending + self._swap(i, self.len - self.pendingLen - 1) + self.pendingLen += 1 + self.lastPoll = time() + return retval diff --git a/src/tests/mock/pybitmessage/shared.py b/src/tests/mock/pybitmessage/shared.py new file mode 100644 index 00000000..4a654932 --- /dev/null +++ b/src/tests/mock/pybitmessage/shared.py @@ -0,0 +1,255 @@ +""" +Some shared functions + +.. deprecated:: 0.6.3 + Should be moved to different places and this file removed, + but it needs refactoring. +""" +from __future__ import division + +# Libraries. +import hashlib +import os +import stat +import subprocess +import sys +from binascii import hexlify + +# Project imports. +import highlevelcrypto +import state +from addresses import decodeAddress, encodeVarint +from bmconfigparser import BMConfigParser +from debug import logger +from helper_sql import sqlQuery + +from pyelliptic import arithmetic + + +myECCryptorObjects = {} +MyECSubscriptionCryptorObjects = {} +# The key in this dictionary is the RIPE hash which is encoded +# in an address and value is the address itself. +myAddressesByHash = {} +# The key in this dictionary is the tag generated from the address. +myAddressesByTag = {} +broadcastSendersForWhichImWatching = {} + + +def isAddressInMyAddressBook(address): + """Is address in my addressbook?""" + queryreturn = sqlQuery( + '''select address from addressbook where address=?''', + address) + return queryreturn != [] + + +# At this point we should really just have a isAddressInMy(book, address)... +def isAddressInMySubscriptionsList(address): + """Am I subscribed to this address?""" + queryreturn = sqlQuery( + '''select * from subscriptions where address=?''', + str(address)) + return queryreturn != [] + + +def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address): + """ + Am I subscribed to this address, is it in my addressbook or whitelist? + """ + if isAddressInMyAddressBook(address): + return True + + queryreturn = sqlQuery( + '''SELECT address FROM whitelist where address=?''' + ''' and enabled = '1' ''', + address) + if queryreturn != []: + return True + + queryreturn = sqlQuery( + '''select address from subscriptions where address=?''' + ''' and enabled = '1' ''', + address) + if queryreturn != []: + return True + return False + + +def decodeWalletImportFormat(WIFstring): + # pylint: disable=inconsistent-return-statements + """ + Convert private key from base58 that's used in the config file to + 8-bit binary string + """ + fullString = arithmetic.changebase(WIFstring, 58, 256) + privkey = fullString[:-4] + if fullString[-4:] != \ + hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]: + logger.critical( + 'Major problem! When trying to decode one of your' + ' private keys, the checksum failed. Here are the first' + ' 6 characters of the PRIVATE key: %s', + str(WIFstring)[:6] + ) + os._exit(0) # pylint: disable=protected-access + # return "" + elif privkey[0] == '\x80': # checksum passed + return privkey[1:] + + logger.critical( + 'Major problem! When trying to decode one of your private keys,' + ' the checksum passed but the key doesn\'t begin with hex 80.' + ' Here is the PRIVATE key: %s', WIFstring + ) + os._exit(0) # pylint: disable=protected-access + + +def reloadMyAddressHashes(): + """Reload keys for user's addresses from the config file""" + logger.debug('reloading keys from keys.dat file') + myECCryptorObjects.clear() + myAddressesByHash.clear() + myAddressesByTag.clear() + # myPrivateKeys.clear() + + keyfileSecure = checkSensitiveFilePermissions(os.path.join( + state.appdata, 'keys.dat')) + hasEnabledKeys = False + for addressInKeysFile in BMConfigParser().addresses(): + isEnabled = BMConfigParser().getboolean(addressInKeysFile, 'enabled') + if isEnabled: + hasEnabledKeys = True + # status + addressVersionNumber, streamNumber, hashobj = decodeAddress(addressInKeysFile)[1:] + if addressVersionNumber in (2, 3, 4): + # Returns a simple 32 bytes of information encoded + # in 64 Hex characters, or null if there was an error. + privEncryptionKey = hexlify(decodeWalletImportFormat( + BMConfigParser().get(addressInKeysFile, 'privencryptionkey'))) + # It is 32 bytes encoded as 64 hex characters + if len(privEncryptionKey) == 64: + myECCryptorObjects[hashobj] = \ + highlevelcrypto.makeCryptor(privEncryptionKey) + myAddressesByHash[hashobj] = addressInKeysFile + tag = hashlib.sha512(hashlib.sha512( + encodeVarint(addressVersionNumber) + + encodeVarint(streamNumber) + hashobj).digest()).digest()[32:] + myAddressesByTag[tag] = addressInKeysFile + else: + logger.error( + 'Error in reloadMyAddressHashes: Can\'t handle' + ' address versions other than 2, 3, or 4.' + ) + + if not keyfileSecure: + fixSensitiveFilePermissions(os.path.join( + state.appdata, 'keys.dat'), hasEnabledKeys) + + +def reloadBroadcastSendersForWhichImWatching(): + """ + Reinitialize runtime data for the broadcasts I'm subscribed to + from the config file + """ + broadcastSendersForWhichImWatching.clear() + MyECSubscriptionCryptorObjects.clear() + queryreturn = sqlQuery('SELECT address FROM subscriptions where enabled=1') + logger.debug('reloading subscriptions...') + for row in queryreturn: + address, = row + # status + addressVersionNumber, streamNumber, hashobj = decodeAddress(address)[1:] + if addressVersionNumber == 2: + broadcastSendersForWhichImWatching[hashobj] = 0 + # Now, for all addresses, even version 2 addresses, + # we should create Cryptor objects in a dictionary which we will + # use to attempt to decrypt encrypted broadcast messages. + + if addressVersionNumber <= 3: + privEncryptionKey = hashlib.sha512( + encodeVarint(addressVersionNumber) + + encodeVarint(streamNumber) + hashobj + ).digest()[:32] + MyECSubscriptionCryptorObjects[hashobj] = \ + highlevelcrypto.makeCryptor(hexlify(privEncryptionKey)) + else: + doubleHashOfAddressData = hashlib.sha512(hashlib.sha512( + encodeVarint(addressVersionNumber) + + encodeVarint(streamNumber) + hashobj + ).digest()).digest() + tag = doubleHashOfAddressData[32:] + privEncryptionKey = doubleHashOfAddressData[:32] + MyECSubscriptionCryptorObjects[tag] = \ + highlevelcrypto.makeCryptor(hexlify(privEncryptionKey)) + + +def fixPotentiallyInvalidUTF8Data(text): + """Sanitise invalid UTF-8 strings""" + try: + text.decode('utf-8') + return text + except UnicodeDecodeError: + return 'Part of the message is corrupt. The message cannot be' \ + ' displayed the normal way.\n\n' + repr(text) + + +def checkSensitiveFilePermissions(filename): + """ + :param str filename: path to the file + :return: True if file appears to have appropriate permissions. + """ + if sys.platform == 'win32': + # .. todo:: This might deserve extra checks by someone familiar with + # Windows systems. + return True + elif sys.platform[:7] == 'freebsd': + # FreeBSD file systems are the same as major Linux file systems + present_permissions = os.stat(filename)[0] + disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO + return present_permissions & disallowed_permissions == 0 + try: + # Skip known problems for non-Win32 filesystems + # without POSIX permissions. + fstype = subprocess.check_output( + 'stat -f -c "%%T" %s' % (filename), + shell=True, + stderr=subprocess.STDOUT + ) + if 'fuseblk' in fstype: + logger.info( + 'Skipping file permissions check for %s.' + ' Filesystem fuseblk detected.', filename) + return True + except: # noqa:E722 + # Swallow exception here, but we might run into trouble later! + logger.error('Could not determine filesystem type. %s', filename) + present_permissions = os.stat(filename)[0] + disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO + return present_permissions & disallowed_permissions == 0 + + +# Fixes permissions on a sensitive file. +def fixSensitiveFilePermissions(filename, hasEnabledKeys): + """Try to change file permissions to be more restrictive""" + if hasEnabledKeys: + logger.warning( + 'Keyfile had insecure permissions, and there were enabled' + ' keys. The truly paranoid should stop using them immediately.') + else: + logger.warning( + 'Keyfile had insecure permissions, but there were no enabled keys.' + ) + try: + present_permissions = os.stat(filename)[0] + disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO + allowed_permissions = ((1 << 32) - 1) ^ disallowed_permissions + new_permissions = ( + allowed_permissions & present_permissions) + os.chmod(filename, new_permissions) + + logger.info('Keyfile permissions automatically fixed.') + + except Exception: + logger.exception('Keyfile permissions could not be fixed.') + raise diff --git a/src/tests/mock/pybitmessage/shutdown.py b/src/tests/mock/pybitmessage/shutdown.py new file mode 100644 index 00000000..3e2b8ca8 --- /dev/null +++ b/src/tests/mock/pybitmessage/shutdown.py @@ -0,0 +1,91 @@ +"""shutdown function""" + +import os +import threading +import time + +from six.moves import queue + +import state +from debug import logger +from helper_sql import sqlQuery, sqlStoredProcedure +from inventory import Inventory +from network import StoppableThread +from network.knownnodes import saveKnownNodes +from queues import ( + addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue) + + +def doCleanShutdown(): + """ + Used to tell all the treads to finish work and exit. + """ + state.shutdown = 1 + + objectProcessorQueue.put(('checkShutdownVariable', 'no data')) + for thread in threading.enumerate(): + if thread.isAlive() and isinstance(thread, StoppableThread): + thread.stopThread() + + UISignalQueue.put(( + 'updateStatusBar', + 'Saving the knownNodes list of peers to disk...')) + logger.info('Saving knownNodes list of peers to disk') + saveKnownNodes() + logger.info('Done saving knownNodes list of peers to disk') + UISignalQueue.put(( + 'updateStatusBar', + 'Done saving the knownNodes list of peers to disk.')) + logger.info('Flushing inventory in memory out to disk...') + UISignalQueue.put(( + 'updateStatusBar', + 'Flushing inventory in memory out to disk.' + ' This should normally only take a second...')) + Inventory().flush() + + # Verify that the objectProcessor has finished exiting. It should have + # incremented the shutdown variable from 1 to 2. This must finish before + # we command the sqlThread to exit. + while state.shutdown == 1: + time.sleep(.1) + + # Wait long enough to guarantee that any running proof of work worker + # threads will check the shutdown variable and exit. If the main thread + # closes before they do then they won't stop. + time.sleep(.25) + + for thread in threading.enumerate(): + if ( + thread is not threading.currentThread() + and isinstance(thread, StoppableThread) + and thread.name != 'SQL' + ): + logger.debug("Waiting for thread %s", thread.name) + thread.join() + + # This one last useless query will guarantee that the previous flush + # committed and that the + # objectProcessorThread committed before we close the program. + sqlQuery('SELECT address FROM subscriptions') + logger.info('Finished flushing inventory.') + sqlStoredProcedure('exit') + + # flush queues + for q in ( + workerQueue, UISignalQueue, addressGeneratorQueue, + objectProcessorQueue): + while True: + try: + q.get(False) + q.task_done() + except queue.Empty: + break + + if state.thisapp.daemon or not state.enableGUI: + logger.info('Clean shutdown complete.') + state.thisapp.cleanup() + os._exit(0) # pylint: disable=protected-access + else: + logger.info('Core shutdown complete.') + for thread in threading.enumerate(): + logger.debug('Thread %s still running', thread.name) diff --git a/src/tests/mock/pybitmessage/singleinstance.py b/src/tests/mock/pybitmessage/singleinstance.py new file mode 100644 index 00000000..660dcf54 --- /dev/null +++ b/src/tests/mock/pybitmessage/singleinstance.py @@ -0,0 +1,111 @@ +""" +This is based upon the singleton class from +`tendo `_ +which is under the Python Software Foundation License version 2 +""" + +import atexit +import os +import sys + +import state + +try: + import fcntl # @UnresolvedImport +except ImportError: + pass + + +class singleinstance(object): + """ + Implements a single instance application by creating a lock file + at appdata. + """ + def __init__(self, flavor_id="", daemon=False): + self.initialized = False + self.counter = 0 + self.daemon = daemon + self.lockPid = None + self.lockfile = os.path.normpath( + os.path.join(state.appdata, 'singleton%s.lock' % flavor_id)) + + if state.enableGUI and not self.daemon and not state.curses: + # Tells the already running (if any) application to get focus. + import bitmessageqt + bitmessageqt.init() + + self.lock() + + self.initialized = True + atexit.register(self.cleanup) + + def lock(self): + """Obtain single instance lock""" + if self.lockPid is None: + self.lockPid = os.getpid() + if sys.platform == 'win32': + try: + # file already exists, we try to remove + # (in case previous execution was interrupted) + if os.path.exists(self.lockfile): + os.unlink(self.lockfile) + self.fd = os.open( + self.lockfile, + os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC + ) + except OSError as e: + if e.errno == 13: + sys.exit( + 'Another instance of this application is' + ' already running') + raise + else: + pidLine = "%i\n" % self.lockPid + os.write(self.fd, pidLine) + else: # non Windows + self.fp = open(self.lockfile, 'a+') + try: + if self.daemon and self.lockPid != os.getpid(): + # wait for parent to finish + fcntl.lockf(self.fp, fcntl.LOCK_EX) + else: + fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB) + self.lockPid = os.getpid() + except IOError: + sys.exit( + 'Another instance of this application is' + ' already running') + else: + pidLine = "%i\n" % self.lockPid + self.fp.truncate(0) + self.fp.write(pidLine) + self.fp.flush() + + def cleanup(self): + """Release single instance lock""" + if not self.initialized: + return + if self.daemon and self.lockPid == os.getpid(): + # these are the two initial forks while daemonizing + try: + if sys.platform == 'win32': + if hasattr(self, 'fd'): + os.close(self.fd) + else: + fcntl.lockf(self.fp, fcntl.LOCK_UN) + except Exception: + pass + + return + + try: + if sys.platform == 'win32': + if hasattr(self, 'fd'): + os.close(self.fd) + os.unlink(self.lockfile) + else: + fcntl.lockf(self.fp, fcntl.LOCK_UN) + if os.path.isfile(self.lockfile): + os.unlink(self.lockfile) + except Exception: + pass diff --git a/src/tests/mock/pybitmessage/singleton.py b/src/tests/mock/pybitmessage/singleton.py new file mode 100644 index 00000000..5c6c43be --- /dev/null +++ b/src/tests/mock/pybitmessage/singleton.py @@ -0,0 +1,22 @@ +""" +Singleton decorator definition +""" + +from functools import wraps + + +def Singleton(cls): + """ + Decorator implementing the singleton pattern: + it restricts the instantiation of a class to one "single" instance. + """ + instances = {} + + # https://github.com/sphinx-doc/sphinx/issues/3783 + @wraps(cls) + def getinstance(): + """Find an instance or save newly created one""" + if cls not in instances: + instances[cls] = cls() + return instances[cls] + return getinstance diff --git a/src/tests/mock/pybitmessage/state.py b/src/tests/mock/pybitmessage/state.py new file mode 100644 index 00000000..be81992d --- /dev/null +++ b/src/tests/mock/pybitmessage/state.py @@ -0,0 +1,72 @@ +""" +Global runtime variables. +""" + +neededPubkeys = {} +streamsInWhichIAmParticipating = [] + +extPort = None +"""For UPnP""" + +socksIP = None +"""for Tor hidden service""" + +appdata = '' +"""holds the location of the application data storage directory""" + +shutdown = 0 +""" +Set to 1 by the `.shutdown.doCleanShutdown` function. +Used to tell the threads to exit. +""" + +# Component control flags - set on startup, do not change during runtime +# The defaults are for standalone GUI (default operating mode) +enableNetwork = True +"""enable network threads""" +enableObjProc = True +"""enable object processing thread""" +enableAPI = True +"""enable API (if configured)""" +enableGUI = True +"""enable GUI (QT or ncurses)""" +enableSTDIO = False +"""enable STDIO threads""" +enableKivy = False +"""enable kivy app and test cases""" +curses = False + +maximumNumberOfHalfOpenConnections = 0 + +maximumLengthOfTimeToBotherResendingMessages = 0 + +invThread = None +addrThread = None +downloadThread = None +uploadThread = None + +ownAddresses = {} + +discoveredPeers = {} + +dandelion = 0 + +testmode = False + +clientHasReceivedIncomingConnections = False +"""used by API command clientStatus""" + +numberOfMessagesProcessed = 0 +numberOfBroadcastsProcessed = 0 +numberOfPubkeysProcessed = 0 + +statusIconColor = 'red' +""" +GUI status icon color +.. note:: bad style, refactor it +""" + +ackdataForWhichImWatching = {} + +thisapp = None +"""Singleton instance""" diff --git a/src/tests/mock/pybitmessage/testmode_init.py b/src/tests/mock/pybitmessage/testmode_init.py new file mode 100644 index 00000000..a088afc1 --- /dev/null +++ b/src/tests/mock/pybitmessage/testmode_init.py @@ -0,0 +1,40 @@ +import time +import uuid + +import helper_inbox +import helper_sql + +# from .tests.samples import sample_inbox_msg_ids, sample_deterministic_addr4 +sample_deterministic_addr4 = 'BM-2cWzSnwjJ7yRP3nLEWUV5LisTZyREWSzUK' +sample_inbox_msg_ids = ['27e644765a3e4b2e973ee7ccf958ea20', '51fc5531-3989-4d69-bbb5-68d64b756f5b', + '2c975c515f8b414db5eea60ba57ba455', 'bc1f2d8a-681c-4cc0-9a12-6067c7e1ac24'] + + +def populate_api_test_data(): + '''Adding test records in inbox table''' + helper_sql.sql_ready.wait() + + test1 = ( + sample_inbox_msg_ids[0], sample_deterministic_addr4, + sample_deterministic_addr4, 'Test1 subject', int(time.time()), + 'Test1 body', 'inbox', 2, 0, uuid.uuid4().bytes + ) + test2 = ( + sample_inbox_msg_ids[1], sample_deterministic_addr4, + sample_deterministic_addr4, 'Test2 subject', int(time.time()), + 'Test2 body', 'inbox', 2, 0, uuid.uuid4().bytes + ) + test3 = ( + sample_inbox_msg_ids[2], sample_deterministic_addr4, + sample_deterministic_addr4, 'Test3 subject', int(time.time()), + 'Test3 body', 'inbox', 2, 0, uuid.uuid4().bytes + ) + test4 = ( + sample_inbox_msg_ids[3], sample_deterministic_addr4, + sample_deterministic_addr4, 'Test4 subject', int(time.time()), + 'Test4 body', 'inbox', 2, 0, uuid.uuid4().bytes + ) + helper_inbox.insert(test1) + helper_inbox.insert(test2) + helper_inbox.insert(test3) + helper_inbox.insert(test4) diff --git a/src/tests/mock/pybitmessage/threads.py b/src/tests/mock/pybitmessage/threads.py new file mode 100644 index 00000000..ac8bf7a6 --- /dev/null +++ b/src/tests/mock/pybitmessage/threads.py @@ -0,0 +1,48 @@ +""" +PyBitmessage does various tasks in separate threads. Most of them inherit +from `.network.StoppableThread`. There are `addressGenerator` for +addresses generation, `objectProcessor` for processing the network objects +passed minimal validation, `singleCleaner` to periodically clean various +internal storages (like inventory and knownnodes) and do forced garbage +collection, `singleWorker` for doing PoW, `sqlThread` for querying sqlite +database. + +There are also other threads in the `.network` package. + +:func:`set_thread_name` is defined here for the threads that don't inherit from +:class:`.network.StoppableThread` +""" + +import threading + +from class_addressGenerator import addressGenerator +from class_objectProcessor import objectProcessor +from class_singleCleaner import singleCleaner +from class_singleWorker import singleWorker +from class_sqlThread import sqlThread + +try: + import prctl +except ImportError: + def set_thread_name(name): + """Set a name for the thread for python internal use.""" + threading.current_thread().name = name +else: + def set_thread_name(name): + """Set the thread name for external use (visible from the OS).""" + prctl.set_name(name) + + def _thread_name_hack(self): + set_thread_name(self.name) + threading.Thread.__bootstrap_original__(self) + # pylint: disable=protected-access + threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap + threading.Thread._Thread__bootstrap = _thread_name_hack + + +printLock = threading.Lock() + +__all__ = [ + "addressGenerator", "objectProcessor", "singleCleaner", "singleWorker", + "sqlThread", "printLock" +] diff --git a/src/tests/mock/pybitmessage/tr.py b/src/tests/mock/pybitmessage/tr.py new file mode 100644 index 00000000..eec82c37 --- /dev/null +++ b/src/tests/mock/pybitmessage/tr.py @@ -0,0 +1,59 @@ +""" +Translating text +""" +import os + +try: + import state +except ImportError: + from . import state + + +class translateClass: + """ + This is used so that the translateText function can be used + when we are in daemon mode and not using any QT functions. + """ + # pylint: disable=old-style-class,too-few-public-methods + def __init__(self, context, text): + self.context = context + self.text = text + + def arg(self, _): + """Replace argument placeholders""" + if '%' in self.text: + # This doesn't actually do anything with the arguments + # because we don't have a UI in which to display this information anyway. + return translateClass(self.context, self.text.replace('%', '', 1)) + return self.text + + +def _translate(context, text, disambiguation=None, encoding=None, n=None): + # pylint: disable=unused-argument + return translateText(context, text, n) + + +def translateText(context, text, n=None): + """Translate text in context""" + try: + enableGUI = state.enableGUI + except AttributeError: # inside the plugin + enableGUI = True + if enableGUI: + try: + from PyQt4 import QtCore, QtGui + except Exception as err: + print('PyBitmessage requires PyQt unless you want to run it as a daemon' + ' and interact with it using the API.' + ' You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download' + ' or by searching Google for \'PyQt Download\'.' + ' If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon') + print('Error message:', err) + os._exit(0) # pylint: disable=protected-access + if n is None: + return QtGui.QApplication.translate(context, text) + return QtGui.QApplication.translate(context, text, None, QtCore.QCoreApplication.CodecForTr, n) + else: + if '%' in text: + return translateClass(context, text.replace('%', '', 1)) + return text diff --git a/src/tests/mock/pybitmessage/upnp.py b/src/tests/mock/pybitmessage/upnp.py new file mode 100644 index 00000000..c6db487b --- /dev/null +++ b/src/tests/mock/pybitmessage/upnp.py @@ -0,0 +1,348 @@ +# pylint: disable=too-many-statements,too-many-branches,protected-access,no-self-use +""" +Complete UPnP port forwarding implementation in separate thread. +Reference: http://mattscodecave.com/posts/using-python-and-upnp-to-forward-a-port +""" + +import httplib +import socket +import time +import urllib2 +from random import randint +from urlparse import urlparse +from xml.dom.minidom import Document, parseString + +import queues +import state +import tr +from bmconfigparser import BMConfigParser +from debug import logger +from network import BMConnectionPool, knownnodes, StoppableThread +from network.node import Peer + + +def createRequestXML(service, action, arguments=None): + """Router UPnP requests are XML formatted""" + + doc = Document() + + # create the envelope element and set its attributes + envelope = doc.createElementNS('', 's:Envelope') + envelope.setAttribute('xmlns:s', 'http://schemas.xmlsoap.org/soap/envelope/') + envelope.setAttribute('s:encodingStyle', 'http://schemas.xmlsoap.org/soap/encoding/') + + # create the body element + body = doc.createElementNS('', 's:Body') + + # create the function element and set its attribute + fn = doc.createElementNS('', 'u:%s' % action) + fn.setAttribute('xmlns:u', 'urn:schemas-upnp-org:service:%s' % service) + + # setup the argument element names and values + # using a list of tuples to preserve order + + # container for created nodes + argument_list = [] + + # iterate over arguments, create nodes, create text nodes, + # append text nodes to nodes, and finally add the ready product + # to argument_list + if arguments is not None: + for k, v in arguments: + tmp_node = doc.createElement(k) + tmp_text_node = doc.createTextNode(v) + tmp_node.appendChild(tmp_text_node) + argument_list.append(tmp_node) + + # append the prepared argument nodes to the function element + for arg in argument_list: + fn.appendChild(arg) + + # append function element to the body element + body.appendChild(fn) + + # append body element to envelope element + envelope.appendChild(body) + + # append envelope element to document, making it the root element + doc.appendChild(envelope) + + # our tree is ready, conver it to a string + return doc.toxml() + + +class UPnPError(Exception): + """Handle a UPnP error""" + + def __init__(self, message): + super(UPnPError, self).__init__() + logger.error(message) + + +class Router: # pylint: disable=old-style-class + """Encapulate routing""" + name = "" + path = "" + address = None + routerPath = None + extPort = None + + def __init__(self, ssdpResponse, address): + + self.address = address + + row = ssdpResponse.split('\r\n') + header = {} + for i in range(1, len(row)): + part = row[i].split(': ') + if len(part) == 2: + header[part[0].lower()] = part[1] + + try: + self.routerPath = urlparse(header['location']) + if not self.routerPath or not hasattr(self.routerPath, "hostname"): + logger.error("UPnP: no hostname: %s", header['location']) + except KeyError: + logger.error("UPnP: missing location header") + + # get the profile xml file and read it into a variable + directory = urllib2.urlopen(header['location']).read() + + # create a DOM object that represents the `directory` document + dom = parseString(directory) + + self.name = dom.getElementsByTagName('friendlyName')[0].childNodes[0].data + # find all 'serviceType' elements + service_types = dom.getElementsByTagName('serviceType') + + for service in service_types: + if service.childNodes[0].data.find('WANIPConnection') > 0 or \ + service.childNodes[0].data.find('WANPPPConnection') > 0: + self.path = service.parentNode.getElementsByTagName('controlURL')[0].childNodes[0].data + self.upnp_schema = service.childNodes[0].data.split(':')[-2] + + def AddPortMapping( + self, + externalPort, + internalPort, + internalClient, + protocol, + description, + leaseDuration=0, + enabled=1, + ): # pylint: disable=too-many-arguments + """Add UPnP port mapping""" + + resp = self.soapRequest(self.upnp_schema + ':1', 'AddPortMapping', [ + ('NewRemoteHost', ''), + ('NewExternalPort', str(externalPort)), + ('NewProtocol', protocol), + ('NewInternalPort', str(internalPort)), + ('NewInternalClient', internalClient), + ('NewEnabled', str(enabled)), + ('NewPortMappingDescription', str(description)), + ('NewLeaseDuration', str(leaseDuration)) + ]) + self.extPort = externalPort + logger.info("Successfully established UPnP mapping for %s:%i on external port %i", + internalClient, internalPort, externalPort) + return resp + + def DeletePortMapping(self, externalPort, protocol): + """Delete UPnP port mapping""" + + resp = self.soapRequest(self.upnp_schema + ':1', 'DeletePortMapping', [ + ('NewRemoteHost', ''), + ('NewExternalPort', str(externalPort)), + ('NewProtocol', protocol), + ]) + logger.info("Removed UPnP mapping on external port %i", externalPort) + return resp + + def GetExternalIPAddress(self): + """Get the external address""" + + resp = self.soapRequest( + self.upnp_schema + ':1', 'GetExternalIPAddress') + dom = parseString(resp.read()) + return dom.getElementsByTagName( + 'NewExternalIPAddress')[0].childNodes[0].data + + def soapRequest(self, service, action, arguments=None): + """Make a request to a router""" + + conn = httplib.HTTPConnection(self.routerPath.hostname, self.routerPath.port) + conn.request( + 'POST', + self.path, + createRequestXML(service, action, arguments), + { + 'SOAPAction': '"urn:schemas-upnp-org:service:%s#%s"' % (service, action), + 'Content-Type': 'text/xml' + } + ) + resp = conn.getresponse() + conn.close() + if resp.status == 500: + respData = resp.read() + try: + dom = parseString(respData) + errinfo = dom.getElementsByTagName('errorDescription') + if errinfo: + logger.error("UPnP error: %s", respData) + raise UPnPError(errinfo[0].childNodes[0].data) + except: # noqa:E722 + raise UPnPError("Unable to parse SOAP error: %s" % (respData)) + return resp + + +class uPnPThread(StoppableThread): + """Start a thread to handle UPnP activity""" + + SSDP_ADDR = "239.255.255.250" + GOOGLE_DNS = "8.8.8.8" + SSDP_PORT = 1900 + SSDP_MX = 2 + SSDP_ST = "urn:schemas-upnp-org:device:InternetGatewayDevice:1" + + def __init__(self): + super(uPnPThread, self).__init__(name="uPnPThread") + self.extPort = BMConfigParser().safeGetInt('bitmessagesettings', 'extport', default=None) + self.localIP = self.getLocalIP() + self.routers = [] + self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.sock.bind((self.localIP, 0)) + self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) + self.sock.settimeout(5) + self.sendSleep = 60 + + def run(self): + """Start the thread to manage UPnP activity""" + + logger.debug("Starting UPnP thread") + logger.debug("Local IP: %s", self.localIP) + lastSent = 0 + + # wait until asyncore binds so that we know the listening port + bound = False + while state.shutdown == 0 and not self._stopped and not bound: + for s in BMConnectionPool().listeningSockets.values(): + if s.is_bound(): + bound = True + if not bound: + time.sleep(1) + + # pylint: disable=attribute-defined-outside-init + self.localPort = BMConfigParser().getint('bitmessagesettings', 'port') + + while state.shutdown == 0 and BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp'): + if time.time() - lastSent > self.sendSleep and not self.routers: + try: + self.sendSearchRouter() + except: # noqa:E722 + pass + lastSent = time.time() + try: + while state.shutdown == 0 and BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp'): + resp, (ip, _) = self.sock.recvfrom(1000) + if resp is None: + continue + newRouter = Router(resp, ip) + for router in self.routers: + if router.routerPath == newRouter.routerPath: + break + else: + logger.debug("Found UPnP router at %s", ip) + self.routers.append(newRouter) + self.createPortMapping(newRouter) + try: + self_peer = Peer( + newRouter.GetExternalIPAddress(), + self.extPort + ) + except: # noqa:E722 + logger.debug('Failed to get external IP') + else: + with knownnodes.knownNodesLock: + knownnodes.addKnownNode( + 1, self_peer, is_self=True) + queues.UISignalQueue.put(('updateStatusBar', tr._translate( + "MainWindow", 'UPnP port mapping established on port %1' + ).arg(str(self.extPort)))) + break + except socket.timeout: + pass + except: # noqa:E722 + logger.error("Failure running UPnP router search.", exc_info=True) + for router in self.routers: + if router.extPort is None: + self.createPortMapping(router) + try: + self.sock.shutdown(socket.SHUT_RDWR) + except: # noqa:E722 + pass + try: + self.sock.close() + except: # noqa:E722 + pass + deleted = False + for router in self.routers: + if router.extPort is not None: + deleted = True + self.deletePortMapping(router) + if deleted: + queues.UISignalQueue.put(('updateStatusBar', tr._translate("MainWindow", 'UPnP port mapping removed'))) + logger.debug("UPnP thread done") + + def getLocalIP(self): + """Get the local IP of the node""" + + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + s.connect((uPnPThread.GOOGLE_DNS, 1)) + return s.getsockname()[0] + + def sendSearchRouter(self): + """Querying for UPnP services""" + + ssdpRequest = "M-SEARCH * HTTP/1.1\r\n" + \ + "HOST: %s:%d\r\n" % (uPnPThread.SSDP_ADDR, uPnPThread.SSDP_PORT) + \ + "MAN: \"ssdp:discover\"\r\n" + \ + "MX: %d\r\n" % (uPnPThread.SSDP_MX, ) + \ + "ST: %s\r\n" % (uPnPThread.SSDP_ST, ) + "\r\n" + + try: + logger.debug("Sending UPnP query") + self.sock.sendto(ssdpRequest, (uPnPThread.SSDP_ADDR, uPnPThread.SSDP_PORT)) + except: # noqa:E722 + logger.exception("UPnP send query failed") + + def createPortMapping(self, router): + """Add a port mapping""" + + for i in range(50): + try: + localIP = self.localIP + if i == 0: + extPort = self.localPort # try same port first + elif i == 1 and self.extPort: + extPort = self.extPort # try external port from last time next + else: + extPort = randint(32767, 65535) + logger.debug( + "Attempt %i, requesting UPnP mapping for %s:%i on external port %i", + i, + localIP, + self.localPort, + extPort) + router.AddPortMapping(extPort, self.localPort, localIP, 'TCP', 'BitMessage') + self.extPort = extPort + BMConfigParser().set('bitmessagesettings', 'extport', str(extPort)) + BMConfigParser().save() + break + except UPnPError: + logger.debug("UPnP error: ", exc_info=True) + + def deletePortMapping(self, router): + """Delete a port mapping""" + router.DeletePortMapping(router.extPort, 'TCP') diff --git a/src/tests/mock/pybitmessage/version.py b/src/tests/mock/pybitmessage/version.py new file mode 100644 index 00000000..076b8c56 --- /dev/null +++ b/src/tests/mock/pybitmessage/version.py @@ -0,0 +1,2 @@ +softwareName = 'PyBitmessage' +softwareVersion = '0.6.3.2' -- 2.47.2 From 99eaa599775a72d6b9a3e6d3351c45f5513a43dc Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Fri, 17 Dec 2021 17:08:31 +0530 Subject: [PATCH 07/10] Fixed imports & ran bitmessagemock without error --- src/tests/mock/bitmessagemock.py | 2 +- src/tests/mock/pybitmessage/addresses.py | 283 --- src/tests/mock/pybitmessage/api.py | 1537 -------------- src/tests/mock/pybitmessage/bitmessagecli.py | 1887 ----------------- src/tests/mock/pybitmessage/bitmessagemain.py | 431 ---- src/tests/mock/pybitmessage/build_osx.py | 38 - .../pybitmessage/class_addressGenerator.py | 24 +- .../pybitmessage/class_objectProcessor.py | 25 +- .../mock/pybitmessage/class_singleCleaner.py | 187 -- .../mock/pybitmessage/class_singleWorker.py | 12 +- .../mock/pybitmessage/class_smtpDeliver.py | 117 - .../mock/pybitmessage/class_smtpServer.py | 217 -- .../mock/pybitmessage/class_sqlThread.py | 639 ------ src/tests/mock/pybitmessage/debug.py | 157 -- src/tests/mock/pybitmessage/defaults.py | 24 - src/tests/mock/pybitmessage/depends.py | 450 ---- .../mock/pybitmessage/fallback/__init__.py | 32 - .../fallback/umsgpack/__init__.py | 0 .../fallback/umsgpack/umsgpack.py | 1067 ---------- .../mock/pybitmessage/helper_ackPayload.py | 51 - .../mock/pybitmessage/helper_addressbook.py | 14 - src/tests/mock/pybitmessage/helper_bitcoin.py | 56 - src/tests/mock/pybitmessage/helper_inbox.py | 30 - .../mock/pybitmessage/helper_msgcoding.py | 159 -- src/tests/mock/pybitmessage/helper_random.py | 74 - src/tests/mock/pybitmessage/helper_search.py | 113 - src/tests/mock/pybitmessage/helper_sent.py | 48 - src/tests/mock/pybitmessage/helper_sql.py | 151 -- src/tests/mock/pybitmessage/helper_startup.py | 392 ---- .../mock/pybitmessage/highlevelcrypto.py | 146 -- src/tests/mock/pybitmessage/l10n.py | 152 -- src/tests/mock/pybitmessage/main.py | 13 - src/tests/mock/pybitmessage/multiqueue.py | 54 - src/tests/mock/pybitmessage/namecoin.py | 374 ---- .../mock/pybitmessage/network/__init__.py | 20 - .../mock/pybitmessage/network/addrthread.py | 49 - .../network/advanceddispatcher.py | 173 -- .../pybitmessage/network/announcethread.py | 43 - .../mock/pybitmessage/network/assemble.py | 31 - .../network/asyncore_pollchoose.py | 1012 --------- .../mock/pybitmessage/network/bmobject.py | 164 -- .../mock/pybitmessage/network/bmproto.py | 709 ------- .../pybitmessage/network/connectionchooser.py | 77 - .../pybitmessage/network/connectionpool.py | 405 ---- .../mock/pybitmessage/network/constants.py | 17 - .../mock/pybitmessage/network/dandelion.py | 196 -- .../pybitmessage/network/downloadthread.py | 84 - src/tests/mock/pybitmessage/network/http.py | 89 - src/tests/mock/pybitmessage/network/httpd.py | 161 -- src/tests/mock/pybitmessage/network/https.py | 71 - .../mock/pybitmessage/network/invthread.py | 111 - .../mock/pybitmessage/network/knownnodes.py | 269 --- .../pybitmessage/network/networkthread.py | 42 - src/tests/mock/pybitmessage/network/node.py | 7 - .../mock/pybitmessage/network/objectracker.py | 136 -- src/tests/mock/pybitmessage/network/proxy.py | 148 -- .../network/receivequeuethread.py | 56 - .../mock/pybitmessage/network/socks4a.py | 147 -- src/tests/mock/pybitmessage/network/socks5.py | 224 -- src/tests/mock/pybitmessage/network/stats.py | 78 - src/tests/mock/pybitmessage/network/tcp.py | 448 ---- .../mock/pybitmessage/network/threads.py | 49 - src/tests/mock/pybitmessage/network/tls.py | 220 -- src/tests/mock/pybitmessage/network/udp.py | 147 -- .../mock/pybitmessage/network/uploadthread.py | 69 - src/tests/mock/pybitmessage/openclpow.py | 111 - src/tests/mock/pybitmessage/openssl.py | 803 ------- src/tests/mock/pybitmessage/pathmagic.py | 10 - src/tests/mock/pybitmessage/paths.py | 131 -- src/tests/mock/pybitmessage/proofofwork.py | 394 ---- src/tests/mock/pybitmessage/protocol.py | 524 ----- src/tests/mock/pybitmessage/pybitmessage | 11 - .../mock/pybitmessage/pyelliptic/__init__.py | 30 - .../pybitmessage/pyelliptic/arithmetic.py | 166 -- .../mock/pybitmessage/pyelliptic/cipher.py | 90 - src/tests/mock/pybitmessage/pyelliptic/ecc.py | 501 ----- .../mock/pybitmessage/pyelliptic/eccblind.py | 373 ---- .../pybitmessage/pyelliptic/eccblindchain.py | 52 - .../mock/pybitmessage/pyelliptic/hash.py | 70 - .../mock/pybitmessage/pyelliptic/openssl.py | 803 ------- .../pybitmessage/pyelliptic/tests/__init__.py | 0 .../pyelliptic/tests/test_arithmetic.py | 84 - .../pyelliptic/tests/test_blindsig.py | 277 --- .../pyelliptic/tests/test_openssl.py | 57 - src/tests/mock/pybitmessage/qidenticon.py | 276 --- src/tests/mock/pybitmessage/queues.py | 12 +- .../mock/pybitmessage/randomtrackingdict.py | 132 -- src/tests/mock/pybitmessage/shared.py | 255 --- src/tests/mock/pybitmessage/shutdown.py | 91 - src/tests/mock/pybitmessage/singleinstance.py | 111 - src/tests/mock/pybitmessage/testmode_init.py | 40 - src/tests/mock/pybitmessage/threads.py | 67 +- src/tests/mock/pybitmessage/tr.py | 59 - src/tests/mock/pybitmessage/upnp.py | 348 --- src/tests/mock/pybitmessage/version.py | 2 - 95 files changed, 54 insertions(+), 20234 deletions(-) delete mode 100644 src/tests/mock/pybitmessage/addresses.py delete mode 100644 src/tests/mock/pybitmessage/api.py delete mode 100644 src/tests/mock/pybitmessage/bitmessagecli.py delete mode 100755 src/tests/mock/pybitmessage/bitmessagemain.py delete mode 100644 src/tests/mock/pybitmessage/build_osx.py delete mode 100644 src/tests/mock/pybitmessage/class_singleCleaner.py delete mode 100644 src/tests/mock/pybitmessage/class_smtpDeliver.py delete mode 100644 src/tests/mock/pybitmessage/class_smtpServer.py delete mode 100644 src/tests/mock/pybitmessage/class_sqlThread.py delete mode 100644 src/tests/mock/pybitmessage/debug.py delete mode 100644 src/tests/mock/pybitmessage/defaults.py delete mode 100755 src/tests/mock/pybitmessage/depends.py delete mode 100644 src/tests/mock/pybitmessage/fallback/__init__.py delete mode 100644 src/tests/mock/pybitmessage/fallback/umsgpack/__init__.py delete mode 100644 src/tests/mock/pybitmessage/fallback/umsgpack/umsgpack.py delete mode 100644 src/tests/mock/pybitmessage/helper_ackPayload.py delete mode 100644 src/tests/mock/pybitmessage/helper_addressbook.py delete mode 100644 src/tests/mock/pybitmessage/helper_bitcoin.py delete mode 100644 src/tests/mock/pybitmessage/helper_inbox.py delete mode 100644 src/tests/mock/pybitmessage/helper_msgcoding.py delete mode 100644 src/tests/mock/pybitmessage/helper_random.py delete mode 100644 src/tests/mock/pybitmessage/helper_search.py delete mode 100644 src/tests/mock/pybitmessage/helper_sent.py delete mode 100644 src/tests/mock/pybitmessage/helper_sql.py delete mode 100644 src/tests/mock/pybitmessage/helper_startup.py delete mode 100644 src/tests/mock/pybitmessage/highlevelcrypto.py delete mode 100644 src/tests/mock/pybitmessage/l10n.py delete mode 100644 src/tests/mock/pybitmessage/main.py delete mode 100644 src/tests/mock/pybitmessage/multiqueue.py delete mode 100644 src/tests/mock/pybitmessage/namecoin.py delete mode 100644 src/tests/mock/pybitmessage/network/__init__.py delete mode 100644 src/tests/mock/pybitmessage/network/addrthread.py delete mode 100644 src/tests/mock/pybitmessage/network/advanceddispatcher.py delete mode 100644 src/tests/mock/pybitmessage/network/announcethread.py delete mode 100644 src/tests/mock/pybitmessage/network/assemble.py delete mode 100644 src/tests/mock/pybitmessage/network/asyncore_pollchoose.py delete mode 100644 src/tests/mock/pybitmessage/network/bmobject.py delete mode 100644 src/tests/mock/pybitmessage/network/bmproto.py delete mode 100644 src/tests/mock/pybitmessage/network/connectionchooser.py delete mode 100644 src/tests/mock/pybitmessage/network/connectionpool.py delete mode 100644 src/tests/mock/pybitmessage/network/constants.py delete mode 100644 src/tests/mock/pybitmessage/network/dandelion.py delete mode 100644 src/tests/mock/pybitmessage/network/downloadthread.py delete mode 100644 src/tests/mock/pybitmessage/network/http.py delete mode 100644 src/tests/mock/pybitmessage/network/httpd.py delete mode 100644 src/tests/mock/pybitmessage/network/https.py delete mode 100644 src/tests/mock/pybitmessage/network/invthread.py delete mode 100644 src/tests/mock/pybitmessage/network/knownnodes.py delete mode 100644 src/tests/mock/pybitmessage/network/networkthread.py delete mode 100644 src/tests/mock/pybitmessage/network/node.py delete mode 100644 src/tests/mock/pybitmessage/network/objectracker.py delete mode 100644 src/tests/mock/pybitmessage/network/proxy.py delete mode 100644 src/tests/mock/pybitmessage/network/receivequeuethread.py delete mode 100644 src/tests/mock/pybitmessage/network/socks4a.py delete mode 100644 src/tests/mock/pybitmessage/network/socks5.py delete mode 100644 src/tests/mock/pybitmessage/network/stats.py delete mode 100644 src/tests/mock/pybitmessage/network/tcp.py delete mode 100644 src/tests/mock/pybitmessage/network/threads.py delete mode 100644 src/tests/mock/pybitmessage/network/tls.py delete mode 100644 src/tests/mock/pybitmessage/network/udp.py delete mode 100644 src/tests/mock/pybitmessage/network/uploadthread.py delete mode 100644 src/tests/mock/pybitmessage/openclpow.py delete mode 100644 src/tests/mock/pybitmessage/openssl.py delete mode 100644 src/tests/mock/pybitmessage/pathmagic.py delete mode 100644 src/tests/mock/pybitmessage/paths.py delete mode 100644 src/tests/mock/pybitmessage/proofofwork.py delete mode 100644 src/tests/mock/pybitmessage/protocol.py delete mode 100644 src/tests/mock/pybitmessage/pybitmessage delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/__init__.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/arithmetic.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/cipher.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/ecc.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/eccblind.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/eccblindchain.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/hash.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/openssl.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/tests/__init__.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/tests/test_arithmetic.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/tests/test_blindsig.py delete mode 100644 src/tests/mock/pybitmessage/pyelliptic/tests/test_openssl.py delete mode 100644 src/tests/mock/pybitmessage/qidenticon.py delete mode 100644 src/tests/mock/pybitmessage/randomtrackingdict.py delete mode 100644 src/tests/mock/pybitmessage/shared.py delete mode 100644 src/tests/mock/pybitmessage/shutdown.py delete mode 100644 src/tests/mock/pybitmessage/singleinstance.py delete mode 100644 src/tests/mock/pybitmessage/testmode_init.py delete mode 100644 src/tests/mock/pybitmessage/tr.py delete mode 100644 src/tests/mock/pybitmessage/upnp.py delete mode 100644 src/tests/mock/pybitmessage/version.py diff --git a/src/tests/mock/bitmessagemock.py b/src/tests/mock/bitmessagemock.py index e309dc17..397e77f8 100644 --- a/src/tests/mock/bitmessagemock.py +++ b/src/tests/mock/bitmessagemock.py @@ -2,6 +2,7 @@ from pybitmessage.class_addressGenerator import addressGenerator from pybitmessage.class_singleWorker import singleWorker from pybitmessage.class_objectProcessor import objectProcessor from pybitmessage.inventory import Inventory +from pybitmessage.bmconfigparser import BMConfigParser class MockMain(): @@ -10,7 +11,6 @@ class MockMain(): def start(self): """Start main application""" # pylint: disable=too-many-statements,too-many-branches,too-many-locals - config = BMConfigParser() daemon = config.safeGetBoolean('bitmessagesettings', 'daemon') diff --git a/src/tests/mock/pybitmessage/addresses.py b/src/tests/mock/pybitmessage/addresses.py deleted file mode 100644 index e48873a1..00000000 --- a/src/tests/mock/pybitmessage/addresses.py +++ /dev/null @@ -1,283 +0,0 @@ -""" -Operations with addresses -""" -# pylint: disable=inconsistent-return-statements -import hashlib -import logging -from binascii import hexlify, unhexlify -from struct import pack, unpack - - -logger = logging.getLogger('default') - -ALPHABET = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" - - -def encodeBase58(num): - """Encode a number in Base X - - Args: - num: The number to encode - alphabet: The alphabet to use for encoding - """ - if num < 0: - return None - if num == 0: - return ALPHABET[0] - arr = [] - base = len(ALPHABET) - while num: - num, rem = divmod(num, base) - arr.append(ALPHABET[rem]) - arr.reverse() - return ''.join(arr) - - -def decodeBase58(string): - """Decode a Base X encoded string into the number - - Args: - string: The encoded string - alphabet: The alphabet to use for encoding - """ - base = len(ALPHABET) - num = 0 - - try: - for char in string: - num *= base - num += ALPHABET.index(char) - except ValueError: - # character not found (like a space character or a 0) - return 0 - return num - - -class varintEncodeError(Exception): - """Exception class for encoding varint""" - pass - - -class varintDecodeError(Exception): - """Exception class for decoding varint data""" - pass - - -def encodeVarint(integer): - """Convert integer into varint bytes""" - if integer < 0: - raise varintEncodeError('varint cannot be < 0') - if integer < 253: - return pack('>B', integer) - if integer >= 253 and integer < 65536: - return pack('>B', 253) + pack('>H', integer) - if integer >= 65536 and integer < 4294967296: - return pack('>B', 254) + pack('>I', integer) - if integer >= 4294967296 and integer < 18446744073709551616: - return pack('>B', 255) + pack('>Q', integer) - if integer >= 18446744073709551616: - raise varintEncodeError('varint cannot be >= 18446744073709551616') - - -def decodeVarint(data): - """ - Decodes an encoded varint to an integer and returns it. - Per protocol v3, the encoded value must be encoded with - the minimum amount of data possible or else it is malformed. - Returns a tuple: (theEncodedValue, theSizeOfTheVarintInBytes) - """ - - if not data: - return (0, 0) - firstByte, = unpack('>B', data[0:1]) - if firstByte < 253: - # encodes 0 to 252 - return (firstByte, 1) # the 1 is the length of the varint - if firstByte == 253: - # encodes 253 to 65535 - if len(data) < 3: - raise varintDecodeError( - 'The first byte of this varint as an integer is %s' - ' but the total length is only %s. It needs to be' - ' at least 3.' % (firstByte, len(data))) - encodedValue, = unpack('>H', data[1:3]) - if encodedValue < 253: - raise varintDecodeError( - 'This varint does not encode the value with the lowest' - ' possible number of bytes.') - return (encodedValue, 3) - if firstByte == 254: - # encodes 65536 to 4294967295 - if len(data) < 5: - raise varintDecodeError( - 'The first byte of this varint as an integer is %s' - ' but the total length is only %s. It needs to be' - ' at least 5.' % (firstByte, len(data))) - encodedValue, = unpack('>I', data[1:5]) - if encodedValue < 65536: - raise varintDecodeError( - 'This varint does not encode the value with the lowest' - ' possible number of bytes.') - return (encodedValue, 5) - if firstByte == 255: - # encodes 4294967296 to 18446744073709551615 - if len(data) < 9: - raise varintDecodeError( - 'The first byte of this varint as an integer is %s' - ' but the total length is only %s. It needs to be' - ' at least 9.' % (firstByte, len(data))) - encodedValue, = unpack('>Q', data[1:9]) - if encodedValue < 4294967296: - raise varintDecodeError( - 'This varint does not encode the value with the lowest' - ' possible number of bytes.') - return (encodedValue, 9) - - -def calculateInventoryHash(data): - """Calculate inventory hash from object data""" - sha = hashlib.new('sha512') - sha2 = hashlib.new('sha512') - sha.update(data) - sha2.update(sha.digest()) - return sha2.digest()[0:32] - - -def encodeAddress(version, stream, ripe): - """Convert ripe to address""" - if version >= 2 and version < 4: - if len(ripe) != 20: - raise Exception( - 'Programming error in encodeAddress: The length of' - ' a given ripe hash was not 20.' - ) - - if ripe[:2] == b'\x00\x00': - ripe = ripe[2:] - elif ripe[:1] == b'\x00': - ripe = ripe[1:] - elif version == 4: - if len(ripe) != 20: - raise Exception( - 'Programming error in encodeAddress: The length of' - ' a given ripe hash was not 20.') - ripe = ripe.lstrip(b'\x00') - - storedBinaryData = encodeVarint(version) + encodeVarint(stream) + ripe - - # Generate the checksum - sha = hashlib.new('sha512') - sha.update(storedBinaryData) - currentHash = sha.digest() - sha = hashlib.new('sha512') - sha.update(currentHash) - checksum = sha.digest()[0:4] - - # FIXME: encodeBase58 should take binary data, to reduce conversions - # encodeBase58(storedBinaryData + checksum) - asInt = int(hexlify(storedBinaryData) + hexlify(checksum), 16) - # should it be str? If yes, it should be everywhere in the code - return 'BM-' + encodeBase58(asInt) - - -def decodeAddress(address): - """ - returns (status, address version number, stream number, - data (almost certainly a ripe hash)) - """ - # pylint: disable=too-many-return-statements,too-many-statements - # pylint: disable=too-many-branches - - address = str(address).strip() - - if address[:3] == 'BM-': - integer = decodeBase58(address[3:]) - else: - integer = decodeBase58(address) - if integer == 0: - status = 'invalidcharacters' - return status, 0, 0, '' - # after converting to hex, the string will be prepended - # with a 0x and appended with a L in python2 - hexdata = hex(integer)[2:].rstrip('L') - - if len(hexdata) % 2 != 0: - hexdata = '0' + hexdata - - data = unhexlify(hexdata) - checksum = data[-4:] - - sha = hashlib.new('sha512') - sha.update(data[:-4]) - currentHash = sha.digest() - sha = hashlib.new('sha512') - sha.update(currentHash) - - if checksum != sha.digest()[0:4]: - status = 'checksumfailed' - return status, 0, 0, '' - - try: - addressVersionNumber, bytesUsedByVersionNumber = decodeVarint(data[:9]) - except varintDecodeError as e: - logger.error(str(e)) - status = 'varintmalformed' - return status, 0, 0, '' - - if addressVersionNumber > 4: - logger.error('cannot decode address version numbers this high') - status = 'versiontoohigh' - return status, 0, 0, '' - elif addressVersionNumber == 0: - logger.error('cannot decode address version numbers of zero.') - status = 'versiontoohigh' - return status, 0, 0, '' - - try: - streamNumber, bytesUsedByStreamNumber = \ - decodeVarint(data[bytesUsedByVersionNumber:]) - except varintDecodeError as e: - logger.error(str(e)) - status = 'varintmalformed' - return status, 0, 0, '' - - status = 'success' - if addressVersionNumber == 1: - return status, addressVersionNumber, streamNumber, data[-24:-4] - elif addressVersionNumber == 2 or addressVersionNumber == 3: - embeddedRipeData = \ - data[bytesUsedByVersionNumber + bytesUsedByStreamNumber:-4] - if len(embeddedRipeData) == 19: - return status, addressVersionNumber, streamNumber, \ - b'\x00' + embeddedRipeData - elif len(embeddedRipeData) == 20: - return status, addressVersionNumber, streamNumber, \ - embeddedRipeData - elif len(embeddedRipeData) == 18: - return status, addressVersionNumber, streamNumber, \ - b'\x00\x00' + embeddedRipeData - elif len(embeddedRipeData) < 18: - return 'ripetooshort', 0, 0, '' - elif len(embeddedRipeData) > 20: - return 'ripetoolong', 0, 0, '' - return 'otherproblem', 0, 0, '' - elif addressVersionNumber == 4: - embeddedRipeData = \ - data[bytesUsedByVersionNumber + bytesUsedByStreamNumber:-4] - if embeddedRipeData[0:1] == b'\x00': - # In order to enforce address non-malleability, encoded - # RIPE data must have NULL bytes removed from the front - return 'encodingproblem', 0, 0, '' - elif len(embeddedRipeData) > 20: - return 'ripetoolong', 0, 0, '' - elif len(embeddedRipeData) < 4: - return 'ripetooshort', 0, 0, '' - x00string = b'\x00' * (20 - len(embeddedRipeData)) - return status, addressVersionNumber, streamNumber, \ - x00string + embeddedRipeData - - -def addBMIfNotPresent(address): - """Prepend BM- to an address if it doesn't already have it""" - address = str(address).strip() - return address if address[:3] == 'BM-' else 'BM-' + address diff --git a/src/tests/mock/pybitmessage/api.py b/src/tests/mock/pybitmessage/api.py deleted file mode 100644 index de220cc4..00000000 --- a/src/tests/mock/pybitmessage/api.py +++ /dev/null @@ -1,1537 +0,0 @@ -# Copyright (c) 2012-2016 Jonathan Warren -# Copyright (c) 2012-2020 The Bitmessage developers - -""" -This is not what you run to start the Bitmessage API. -Instead, `enable the API `_ -and optionally `enable daemon mode `_ -then run the PyBitmessage. - -The PyBitmessage API is provided either as -`XML-RPC `_ or -`JSON-RPC `_ like in bitcoin. -It's selected according to 'apivariant' setting in config file. - -Special value ``apivariant=legacy`` is to mimic the old pre 0.6.3 -behaviour when any results are returned as strings of json. - -.. list-table:: All config settings related to API: - :header-rows: 0 - - * - apienabled = true - - if 'false' the `singleAPI` wont start - * - apiinterface = 127.0.0.1 - - this is the recommended default - * - apiport = 8442 - - the API listens apiinterface:apiport if apiport is not used, - random in range (32767, 65535) otherwice - * - apivariant = xml - - current default for backward compatibility, 'json' is recommended - * - apiusername = username - - set the username - * - apipassword = password - - and the password - * - apinotifypath = - - not really the API setting, this sets a path for the executable to be ran - when certain internal event happens - -To use the API concider such simple example: - -.. code-block:: python - - import jsonrpclib - - from pybitmessage import bmconfigparser, helper_startup - - helper_startup.loadConfig() # find and load local config file - conf = bmconfigparser.BMConfigParser() - api_uri = "http://%s:%s@127.0.0.1:8442/" % ( - conf.safeGet('bitmessagesettings', 'apiusername'), - conf.safeGet('bitmessagesettings', 'apipassword') - ) - api = jsonrpclib.ServerProxy(api_uri) - print(api.clientStatus()) - - -For further examples please reference `.tests.test_api`. -""" - -import base64 -import ConfigParser -import errno -import hashlib -import httplib -import json -import random # nosec -import socket -import subprocess -import time -import xmlrpclib -from binascii import hexlify, unhexlify -from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler, SimpleXMLRPCServer -from struct import pack - -import defaults -import helper_inbox -import helper_sent -import network.stats -import proofofwork -import queues -import shared -import shutdown -import state -from addresses import ( - addBMIfNotPresent, - calculateInventoryHash, - decodeAddress, - decodeVarint, - varintDecodeError -) -from bmconfigparser import BMConfigParser -from debug import logger -from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery, sqlStoredProcedure, sql_ready -from inventory import Inventory -from network.threads import StoppableThread -from six.moves import queue -from version import softwareVersion - -try: # TODO: write tests for XML vulnerabilities - from defusedxml.xmlrpc import monkey_patch -except ImportError: - logger.warning( - 'defusedxml not available, only use API on a secure, closed network.') -else: - monkey_patch() - - -str_chan = '[chan]' -str_broadcast_subscribers = '[Broadcast subscribers]' - - -class ErrorCodes(type): - """Metaclass for :class:`APIError` documenting error codes.""" - _CODES = { - 0: 'Invalid command parameters number', - 1: 'The specified passphrase is blank.', - 2: 'The address version number currently must be 3, 4, or 0' - ' (which means auto-select).', - 3: 'The stream number must be 1 (or 0 which means' - ' auto-select). Others aren\'t supported.', - 4: 'Why would you ask me to generate 0 addresses for you?', - 5: 'You have (accidentally?) specified too many addresses to' - ' make. Maximum 999. This check only exists to prevent' - ' mischief; if you really want to create more addresses than' - ' this, contact the Bitmessage developers and we can modify' - ' the check or you can do it yourself by searching the source' - ' code for this message.', - 6: 'The encoding type must be 2 or 3.', - 7: 'Could not decode address', - 8: 'Checksum failed for address', - 9: 'Invalid characters in address', - 10: 'Address version number too high (or zero)', - 11: 'The address version number currently must be 2, 3 or 4.' - ' Others aren\'t supported. Check the address.', - 12: 'The stream number must be 1. Others aren\'t supported.' - ' Check the address.', - 13: 'Could not find this address in your keys.dat file.', - 14: 'Your fromAddress is disabled. Cannot send.', - 15: 'Invalid ackData object size.', - 16: 'You are already subscribed to that address.', - 17: 'Label is not valid UTF-8 data.', - 18: 'Chan name does not match address.', - 19: 'The length of hash should be 32 bytes (encoded in hex' - ' thus 64 characters).', - 20: 'Invalid method:', - 21: 'Unexpected API Failure', - 22: 'Decode error', - 23: 'Bool expected in eighteenByteRipe', - 24: 'Chan address is already present.', - 25: 'Specified address is not a chan address.' - ' Use deleteAddress API call instead.', - 26: 'Malformed varint in address: ', - 27: 'Message is too long.' - } - - def __new__(mcs, name, bases, namespace): - result = super(ErrorCodes, mcs).__new__(mcs, name, bases, namespace) - for code in mcs._CODES.iteritems(): - # beware: the formatting is adjusted for list-table - result.__doc__ += """ * - %04i - - %s - """ % code - return result - - -class APIError(xmlrpclib.Fault): - """ - APIError exception class - - .. list-table:: Possible error values - :header-rows: 1 - :widths: auto - - * - Error Number - - Message - """ - __metaclass__ = ErrorCodes - - def __str__(self): - return "API Error %04i: %s" % (self.faultCode, self.faultString) - - -# This thread, of which there is only one, runs the API. -class singleAPI(StoppableThread): - """API thread""" - - name = "singleAPI" - - def stopThread(self): - super(singleAPI, self).stopThread() - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - s.connect(( - BMConfigParser().get('bitmessagesettings', 'apiinterface'), - BMConfigParser().getint('bitmessagesettings', 'apiport') - )) - s.shutdown(socket.SHUT_RDWR) - s.close() - except BaseException: - pass - - def run(self): - """ - The instance of `SimpleXMLRPCServer.SimpleXMLRPCServer` or - :class:`jsonrpclib.SimpleJSONRPCServer` is created and started here - with `BMRPCDispatcher` dispatcher. - """ - port = BMConfigParser().getint('bitmessagesettings', 'apiport') - try: - getattr(errno, 'WSAEADDRINUSE') - except AttributeError: - errno.WSAEADDRINUSE = errno.EADDRINUSE - - RPCServerBase = SimpleXMLRPCServer - ct = 'text/xml' - if BMConfigParser().safeGet( - 'bitmessagesettings', 'apivariant') == 'json': - try: - from jsonrpclib.SimpleJSONRPCServer import ( - SimpleJSONRPCServer as RPCServerBase) - except ImportError: - logger.warning( - 'jsonrpclib not available, failing back to XML-RPC') - else: - ct = 'application/json-rpc' - - # Nested class. FIXME not found a better solution. - class StoppableRPCServer(RPCServerBase): - """A SimpleXMLRPCServer that honours state.shutdown""" - allow_reuse_address = True - content_type = ct - - def serve_forever(self, poll_interval=None): - """Start the RPCServer""" - sql_ready.wait() - while state.shutdown == 0: - self.handle_request() - - for attempt in range(50): - try: - if attempt > 0: - logger.warning( - 'Failed to start API listener on port %s', port) - port = random.randint(32767, 65535) - se = StoppableRPCServer( - (BMConfigParser().get( - 'bitmessagesettings', 'apiinterface'), - port), - BMXMLRPCRequestHandler, True, encoding='UTF-8') - except socket.error as e: - if e.errno in (errno.EADDRINUSE, errno.WSAEADDRINUSE): - continue - else: - if attempt > 0: - logger.warning('Setting apiport to %s', port) - BMConfigParser().set( - 'bitmessagesettings', 'apiport', str(port)) - BMConfigParser().save() - break - - se.register_instance(BMRPCDispatcher()) - se.register_introspection_functions() - - apiNotifyPath = BMConfigParser().safeGet( - 'bitmessagesettings', 'apinotifypath') - - if apiNotifyPath: - logger.info('Trying to call %s', apiNotifyPath) - try: - subprocess.call([apiNotifyPath, "startingUp"]) - except OSError: - logger.warning( - 'Failed to call %s, removing apinotifypath setting', - apiNotifyPath) - BMConfigParser().remove_option( - 'bitmessagesettings', 'apinotifypath') - - se.serve_forever() - - -class CommandHandler(type): - """ - The metaclass for `BMRPCDispatcher` which fills _handlers dict by - methods decorated with @command - """ - def __new__(mcs, name, bases, namespace): - # pylint: disable=protected-access - result = super(CommandHandler, mcs).__new__( - mcs, name, bases, namespace) - result.config = BMConfigParser() - result._handlers = {} - apivariant = result.config.safeGet('bitmessagesettings', 'apivariant') - for func in namespace.values(): - try: - for alias in getattr(func, '_cmd'): - try: - prefix, alias = alias.split(':') - if apivariant != prefix: - continue - except ValueError: - pass - result._handlers[alias] = func - except AttributeError: - pass - return result - - -class testmode(object): # pylint: disable=too-few-public-methods - """Decorator to check testmode & route to command decorator""" - - def __init__(self, *aliases): - self.aliases = aliases - - def __call__(self, func): - """Testmode call method""" - - if not state.testmode: - return None - return command(self.aliases[0]).__call__(func) - - -class command(object): # pylint: disable=too-few-public-methods - """Decorator for API command method""" - def __init__(self, *aliases): - self.aliases = aliases - - def __call__(self, func): - - if BMConfigParser().safeGet( - 'bitmessagesettings', 'apivariant') == 'legacy': - def wrapper(*args): - """ - A wrapper for legacy apivariant which dumps the result - into string of json - """ - result = func(*args) - return result if isinstance(result, (int, str)) \ - else json.dumps(result, indent=4) - wrapper.__doc__ = func.__doc__ - else: - wrapper = func - # pylint: disable=protected-access - wrapper._cmd = self.aliases - wrapper.__doc__ = """Commands: *%s* - - """ % ', '.join(self.aliases) + wrapper.__doc__.lstrip() - return wrapper - - -# This is one of several classes that constitute the API -# This class was written by Vaibhav Bhatia. -# Modified by Jonathan Warren (Atheros). -# Further modified by the Bitmessage developers -# http://code.activestate.com/recipes/501148 -class BMXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): - """The main API handler""" - - # pylint: disable=protected-access - def do_POST(self): - """ - Handles the HTTP POST request. - - Attempts to interpret all HTTP POST requests as XML-RPC calls, - which are forwarded to the server's _dispatch method for handling. - - .. note:: this method is the same as in - `SimpleXMLRPCServer.SimpleXMLRPCRequestHandler`, - just hacked to handle cookies - """ - - # Check that the path is legal - if not self.is_rpc_path_valid(): - self.report_404() - return - - try: - # Get arguments by reading body of request. - # We read this in chunks to avoid straining - # socket.read(); around the 10 or 15Mb mark, some platforms - # begin to have problems (bug #792570). - max_chunk_size = 10 * 1024 * 1024 - size_remaining = int(self.headers["content-length"]) - L = [] - while size_remaining: - chunk_size = min(size_remaining, max_chunk_size) - L.append(self.rfile.read(chunk_size)) - size_remaining -= len(L[-1]) - data = ''.join(L) - - # pylint: disable=attribute-defined-outside-init - self.cookies = [] - - validuser = self.APIAuthenticateClient() - if not validuser: - time.sleep(2) - self.send_response(httplib.UNAUTHORIZED) - self.end_headers() - return - # "RPC Username or password incorrect or HTTP header" - # " lacks authentication at all." - else: - # In previous versions of SimpleXMLRPCServer, _dispatch - # could be overridden in this class, instead of in - # SimpleXMLRPCDispatcher. To maintain backwards compatibility, - # check to see if a subclass implements _dispatch and dispatch - # using that method if present. - - response = self.server._marshaled_dispatch( - data, getattr(self, '_dispatch', None) - ) - except Exception: # This should only happen if the module is buggy - # internal error, report as HTTP server error - self.send_response(httplib.INTERNAL_SERVER_ERROR) - self.end_headers() - else: - # got a valid XML RPC response - self.send_response(httplib.OK) - self.send_header("Content-type", self.server.content_type) - self.send_header("Content-length", str(len(response))) - - # HACK :start -> sends cookies here - if self.cookies: - for cookie in self.cookies: - self.send_header('Set-Cookie', cookie.output(header='')) - # HACK :end - - self.end_headers() - self.wfile.write(response) - - # shut down the connection - self.wfile.flush() - self.connection.shutdown(1) - - # actually handle shutdown command after sending response - if state.shutdown is False: - shutdown.doCleanShutdown() - - def APIAuthenticateClient(self): - """ - Predicate to check for valid API credentials in the request header - """ - - if 'Authorization' in self.headers: - # handle Basic authentication - encstr = self.headers.get('Authorization').split()[1] - emailid, password = encstr.decode('base64').split(':') - return ( - emailid == BMConfigParser().get( - 'bitmessagesettings', 'apiusername' - ) and password == BMConfigParser().get( - 'bitmessagesettings', 'apipassword')) - else: - logger.warning( - 'Authentication failed because header lacks' - ' Authentication field') - time.sleep(2) - - return False - - -# pylint: disable=no-self-use,no-member,too-many-public-methods -class BMRPCDispatcher(object): - """This class is used to dispatch API commands""" - __metaclass__ = CommandHandler - - @staticmethod - def _decode(text, decode_type): - try: - if decode_type == 'hex': - return unhexlify(text) - elif decode_type == 'base64': - return base64.b64decode(text) - except Exception as e: - raise APIError( - 22, 'Decode error - %s. Had trouble while decoding string: %r' - % (e, text) - ) - - def _verifyAddress(self, address): - status, addressVersionNumber, streamNumber, ripe = \ - decodeAddress(address) - if status != 'success': - if status == 'checksumfailed': - raise APIError(8, 'Checksum failed for address: ' + address) - if status == 'invalidcharacters': - raise APIError(9, 'Invalid characters in address: ' + address) - if status == 'versiontoohigh': - raise APIError( - 10, 'Address version number too high (or zero) in address: ' - + address) - if status == 'varintmalformed': - raise APIError(26, 'Malformed varint in address: ' + address) - raise APIError( - 7, 'Could not decode address: %s : %s' % (address, status)) - if addressVersionNumber < 2 or addressVersionNumber > 4: - raise APIError( - 11, 'The address version number currently must be 2, 3 or 4.' - ' Others aren\'t supported. Check the address.' - ) - if streamNumber != 1: - raise APIError( - 12, 'The stream number must be 1. Others aren\'t supported.' - ' Check the address.' - ) - - return { - 'status': status, - 'addressVersion': addressVersionNumber, - 'streamNumber': streamNumber, - 'ripe': base64.b64encode(ripe) - } if self._method == 'decodeAddress' else ( - status, addressVersionNumber, streamNumber, ripe) - - @staticmethod - def _dump_inbox_message( # pylint: disable=too-many-arguments - msgid, toAddress, fromAddress, subject, received, - message, encodingtype, read): - subject = shared.fixPotentiallyInvalidUTF8Data(subject) - message = shared.fixPotentiallyInvalidUTF8Data(message) - return { - 'msgid': hexlify(msgid), - 'toAddress': toAddress, - 'fromAddress': fromAddress, - 'subject': base64.b64encode(subject), - 'message': base64.b64encode(message), - 'encodingType': encodingtype, - 'receivedTime': received, - 'read': read - } - - @staticmethod - def _dump_sent_message( # pylint: disable=too-many-arguments - msgid, toAddress, fromAddress, subject, lastactiontime, - message, encodingtype, status, ackdata): - subject = shared.fixPotentiallyInvalidUTF8Data(subject) - message = shared.fixPotentiallyInvalidUTF8Data(message) - return { - 'msgid': hexlify(msgid), - 'toAddress': toAddress, - 'fromAddress': fromAddress, - 'subject': base64.b64encode(subject), - 'message': base64.b64encode(message), - 'encodingType': encodingtype, - 'lastActionTime': lastactiontime, - 'status': status, - 'ackData': hexlify(ackdata) - } - - # Request Handlers - - @command('decodeAddress') - def HandleDecodeAddress(self, address): - """ - Decode given address and return dict with - status, addressVersion, streamNumber and ripe keys - """ - return self._verifyAddress(address) - - @command('listAddresses', 'listAddresses2') - def HandleListAddresses(self): - """ - Returns dict with a list of all used addresses with their properties - in the *addresses* key. - """ - data = [] - for address in self.config.addresses(): - streamNumber = decodeAddress(address)[2] - label = self.config.get(address, 'label') - if self._method == 'listAddresses2': - label = base64.b64encode(label) - data.append({ - 'label': label, - 'address': address, - 'stream': streamNumber, - 'enabled': self.config.safeGetBoolean(address, 'enabled'), - 'chan': self.config.safeGetBoolean(address, 'chan') - }) - return {'addresses': data} - - # the listAddressbook alias should be removed eventually. - @command('listAddressBookEntries', 'legacy:listAddressbook') - def HandleListAddressBookEntries(self, label=None): - """ - Returns dict with a list of all address book entries (address and label) - in the *addresses* key. - """ - queryreturn = sqlQuery( - "SELECT label, address from addressbook WHERE label = ?", - label - ) if label else sqlQuery("SELECT label, address from addressbook") - data = [] - for label, address in queryreturn: - label = shared.fixPotentiallyInvalidUTF8Data(label) - data.append({ - 'label': base64.b64encode(label), - 'address': address - }) - return {'addresses': data} - - # the addAddressbook alias should be deleted eventually. - @command('addAddressBookEntry', 'legacy:addAddressbook') - def HandleAddAddressBookEntry(self, address, label): - """Add an entry to address book. label must be base64 encoded.""" - label = self._decode(label, "base64") - address = addBMIfNotPresent(address) - self._verifyAddress(address) - # TODO: add unique together constraint in the table - queryreturn = sqlQuery( - "SELECT address FROM addressbook WHERE address=?", address) - if queryreturn != []: - raise APIError( - 16, 'You already have this address in your address book.') - - sqlExecute("INSERT INTO addressbook VALUES(?,?)", label, address) - queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) - queues.UISignalQueue.put(('rerenderMessagelistToLabels', '')) - queues.UISignalQueue.put(('rerenderAddressBook', '')) - return "Added address %s to address book" % address - - # the deleteAddressbook alias should be deleted eventually. - @command('deleteAddressBookEntry', 'legacy:deleteAddressbook') - def HandleDeleteAddressBookEntry(self, address): - """Delete an entry from address book.""" - address = addBMIfNotPresent(address) - self._verifyAddress(address) - sqlExecute('DELETE FROM addressbook WHERE address=?', address) - queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) - queues.UISignalQueue.put(('rerenderMessagelistToLabels', '')) - queues.UISignalQueue.put(('rerenderAddressBook', '')) - return "Deleted address book entry for %s if it existed" % address - - @command('createRandomAddress') - def HandleCreateRandomAddress( - self, label, eighteenByteRipe=False, totalDifficulty=0, - smallMessageDifficulty=0 - ): - """ - Create one address using the random number generator. - - :param str label: base64 encoded label for the address - :param bool eighteenByteRipe: is telling Bitmessage whether to - generate an address with an 18 byte RIPE hash - (as opposed to a 19 byte hash). - """ - - nonceTrialsPerByte = self.config.get( - 'bitmessagesettings', 'defaultnoncetrialsperbyte' - ) if not totalDifficulty else int( - defaults.networkDefaultProofOfWorkNonceTrialsPerByte - * totalDifficulty) - payloadLengthExtraBytes = self.config.get( - 'bitmessagesettings', 'defaultpayloadlengthextrabytes' - ) if not smallMessageDifficulty else int( - defaults.networkDefaultPayloadLengthExtraBytes - * smallMessageDifficulty) - - if not isinstance(eighteenByteRipe, bool): - raise APIError( - 23, 'Bool expected in eighteenByteRipe, saw %s instead' - % type(eighteenByteRipe)) - label = self._decode(label, "base64") - try: - label.decode('utf-8') - except UnicodeDecodeError: - raise APIError(17, 'Label is not valid UTF-8 data.') - queues.apiAddressGeneratorReturnQueue.queue.clear() - # FIXME hard coded stream no - streamNumberForAddress = 1 - queues.addressGeneratorQueue.put(( - 'createRandomAddress', 4, streamNumberForAddress, label, 1, "", - eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes - )) - return queues.apiAddressGeneratorReturnQueue.get() - - # pylint: disable=too-many-arguments - @command('createDeterministicAddresses') - def HandleCreateDeterministicAddresses( - self, passphrase, numberOfAddresses=1, addressVersionNumber=0, - streamNumber=0, eighteenByteRipe=False, totalDifficulty=0, - smallMessageDifficulty=0 - ): - """ - Create many addresses deterministically using the passphrase. - - :param str passphrase: base64 encoded passphrase - :param int numberOfAddresses: number of addresses to create, - up to 999 - - *addressVersionNumber* and *streamNumber* may be set to 0 - which will tell Bitmessage to use the most up-to-date - address version and the most available stream. - """ - - nonceTrialsPerByte = self.config.get( - 'bitmessagesettings', 'defaultnoncetrialsperbyte' - ) if not totalDifficulty else int( - defaults.networkDefaultProofOfWorkNonceTrialsPerByte - * totalDifficulty) - payloadLengthExtraBytes = self.config.get( - 'bitmessagesettings', 'defaultpayloadlengthextrabytes' - ) if not smallMessageDifficulty else int( - defaults.networkDefaultPayloadLengthExtraBytes - * smallMessageDifficulty) - - if not passphrase: - raise APIError(1, 'The specified passphrase is blank.') - if not isinstance(eighteenByteRipe, bool): - raise APIError( - 23, 'Bool expected in eighteenByteRipe, saw %s instead' - % type(eighteenByteRipe)) - passphrase = self._decode(passphrase, "base64") - # 0 means "just use the proper addressVersionNumber" - if addressVersionNumber == 0: - addressVersionNumber = 4 - if addressVersionNumber not in (3, 4): - raise APIError( - 2, 'The address version number currently must be 3, 4, or 0' - ' (which means auto-select). %i isn\'t supported.' - % addressVersionNumber) - if streamNumber == 0: # 0 means "just use the most available stream" - streamNumber = 1 # FIXME hard coded stream no - if streamNumber != 1: - raise APIError( - 3, 'The stream number must be 1 (or 0 which means' - ' auto-select). Others aren\'t supported.') - if numberOfAddresses == 0: - raise APIError( - 4, 'Why would you ask me to generate 0 addresses for you?') - if numberOfAddresses > 999: - raise APIError( - 5, 'You have (accidentally?) specified too many addresses to' - ' make. Maximum 999. This check only exists to prevent' - ' mischief; if you really want to create more addresses than' - ' this, contact the Bitmessage developers and we can modify' - ' the check or you can do it yourself by searching the source' - ' code for this message.') - queues.apiAddressGeneratorReturnQueue.queue.clear() - logger.debug( - 'Requesting that the addressGenerator create %s addresses.', - numberOfAddresses) - queues.addressGeneratorQueue.put(( - 'createDeterministicAddresses', addressVersionNumber, streamNumber, - 'unused API address', numberOfAddresses, passphrase, - eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes - )) - - return {'addresses': queues.apiAddressGeneratorReturnQueue.get()} - - @command('getDeterministicAddress') - def HandleGetDeterministicAddress( - self, passphrase, addressVersionNumber, streamNumber): - """ - Similar to *createDeterministicAddresses* except that the one - address that is returned will not be added to the Bitmessage - user interface or the keys.dat file. - """ - - numberOfAddresses = 1 - eighteenByteRipe = False - if not passphrase: - raise APIError(1, 'The specified passphrase is blank.') - passphrase = self._decode(passphrase, "base64") - if addressVersionNumber not in (3, 4): - raise APIError( - 2, 'The address version number currently must be 3 or 4. %i' - ' isn\'t supported.' % addressVersionNumber) - if streamNumber != 1: - raise APIError( - 3, ' The stream number must be 1. Others aren\'t supported.') - queues.apiAddressGeneratorReturnQueue.queue.clear() - logger.debug( - 'Requesting that the addressGenerator create %s addresses.', - numberOfAddresses) - queues.addressGeneratorQueue.put(( - 'getDeterministicAddress', addressVersionNumber, streamNumber, - 'unused API address', numberOfAddresses, passphrase, - eighteenByteRipe - )) - return queues.apiAddressGeneratorReturnQueue.get() - - @command('createChan') - def HandleCreateChan(self, passphrase): - """ - Creates a new chan. passphrase must be base64 encoded. - Returns the corresponding Bitmessage address. - """ - - passphrase = self._decode(passphrase, "base64") - if not passphrase: - raise APIError(1, 'The specified passphrase is blank.') - # It would be nice to make the label the passphrase but it is - # possible that the passphrase contains non-utf-8 characters. - try: - passphrase.decode('utf-8') - label = str_chan + ' ' + passphrase - except UnicodeDecodeError: - label = str_chan + ' ' + repr(passphrase) - - addressVersionNumber = 4 - streamNumber = 1 - queues.apiAddressGeneratorReturnQueue.queue.clear() - logger.debug( - 'Requesting that the addressGenerator create chan %s.', passphrase) - queues.addressGeneratorQueue.put(( - 'createChan', addressVersionNumber, streamNumber, label, - passphrase, True - )) - queueReturn = queues.apiAddressGeneratorReturnQueue.get() - try: - return queueReturn[0] - except IndexError: - raise APIError(24, 'Chan address is already present.') - - @command('joinChan') - def HandleJoinChan(self, passphrase, suppliedAddress): - """ - Join a chan. passphrase must be base64 encoded. Returns 'success'. - """ - - passphrase = self._decode(passphrase, "base64") - if not passphrase: - raise APIError(1, 'The specified passphrase is blank.') - # It would be nice to make the label the passphrase but it is - # possible that the passphrase contains non-utf-8 characters. - try: - passphrase.decode('utf-8') - label = str_chan + ' ' + passphrase - except UnicodeDecodeError: - label = str_chan + ' ' + repr(passphrase) - - self._verifyAddress(suppliedAddress) - suppliedAddress = addBMIfNotPresent(suppliedAddress) - queues.apiAddressGeneratorReturnQueue.queue.clear() - queues.addressGeneratorQueue.put(( - 'joinChan', suppliedAddress, label, passphrase, True - )) - queueReturn = queues.apiAddressGeneratorReturnQueue.get() - try: - if queueReturn[0] == 'chan name does not match address': - raise APIError(18, 'Chan name does not match address.') - except IndexError: - raise APIError(24, 'Chan address is already present.') - - return "success" - - @command('leaveChan') - def HandleLeaveChan(self, address): - """ - Leave a chan. Returns 'success'. - - .. note:: at this time, the address is still shown in the UI - until a restart. - """ - self._verifyAddress(address) - address = addBMIfNotPresent(address) - if not self.config.safeGetBoolean(address, 'chan'): - raise APIError( - 25, 'Specified address is not a chan address.' - ' Use deleteAddress API call instead.') - try: - self.config.remove_section(address) - except ConfigParser.NoSectionError: - raise APIError( - 13, 'Could not find this address in your keys.dat file.') - self.config.save() - queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) - queues.UISignalQueue.put(('rerenderMessagelistToLabels', '')) - return "success" - - @command('deleteAddress') - def HandleDeleteAddress(self, address): - """ - Permanently delete the address from keys.dat file. Returns 'success'. - """ - self._verifyAddress(address) - address = addBMIfNotPresent(address) - try: - self.config.remove_section(address) - except ConfigParser.NoSectionError: - raise APIError( - 13, 'Could not find this address in your keys.dat file.') - self.config.save() - queues.UISignalQueue.put(('writeNewAddressToTable', ('', '', ''))) - shared.reloadMyAddressHashes() - return "success" - - @command('getAllInboxMessages') - def HandleGetAllInboxMessages(self): - """ - Returns a dict with all inbox messages in the *inboxMessages* key. - The message is a dict with such keys: - *msgid*, *toAddress*, *fromAddress*, *subject*, *message*, - *encodingType*, *receivedTime*, *read*. - *msgid* is hex encoded string. - *subject* and *message* are base64 encoded. - """ - - queryreturn = sqlQuery( - "SELECT msgid, toaddress, fromaddress, subject, received, message," - " encodingtype, read FROM inbox WHERE folder='inbox'" - " ORDER BY received" - ) - return {"inboxMessages": [ - self._dump_inbox_message(*data) for data in queryreturn - ]} - - @command('getAllInboxMessageIds', 'getAllInboxMessageIDs') - def HandleGetAllInboxMessageIds(self): - """ - The same as *getAllInboxMessages* but returns only *msgid*s, - result key - *inboxMessageIds*. - """ - - queryreturn = sqlQuery( - "SELECT msgid FROM inbox where folder='inbox' ORDER BY received") - - return {"inboxMessageIds": [ - {'msgid': hexlify(msgid)} for msgid, in queryreturn - ]} - - @command('getInboxMessageById', 'getInboxMessageByID') - def HandleGetInboxMessageById(self, hid, readStatus=None): - """ - Returns a dict with list containing single message in the result - key *inboxMessage*. May also return None if message was not found. - - :param str hid: hex encoded msgid - :param bool readStatus: sets the message's read status if present - """ - - msgid = self._decode(hid, "hex") - if readStatus is not None: - if not isinstance(readStatus, bool): - raise APIError( - 23, 'Bool expected in readStatus, saw %s instead.' - % type(readStatus)) - queryreturn = sqlQuery( - "SELECT read FROM inbox WHERE msgid=?", msgid) - # UPDATE is slow, only update if status is different - try: - if (queryreturn[0][0] == 1) != readStatus: - sqlExecute( - "UPDATE inbox set read = ? WHERE msgid=?", - readStatus, msgid) - queues.UISignalQueue.put(('changedInboxUnread', None)) - except IndexError: - pass - queryreturn = sqlQuery( - "SELECT msgid, toaddress, fromaddress, subject, received, message," - " encodingtype, read FROM inbox WHERE msgid=?", msgid - ) - try: - return {"inboxMessage": [ - self._dump_inbox_message(*queryreturn[0])]} - except IndexError: - pass # FIXME inconsistent - - @command('getAllSentMessages') - def HandleGetAllSentMessages(self): - """ - The same as *getAllInboxMessages* but for sent, - result key - *sentMessages*. Message dict keys are: - *msgid*, *toAddress*, *fromAddress*, *subject*, *message*, - *encodingType*, *lastActionTime*, *status*, *ackData*. - *ackData* is also a hex encoded string. - """ - - queryreturn = sqlQuery( - "SELECT msgid, toaddress, fromaddress, subject, lastactiontime," - " message, encodingtype, status, ackdata FROM sent" - " WHERE folder='sent' ORDER BY lastactiontime" - ) - return {"sentMessages": [ - self._dump_sent_message(*data) for data in queryreturn - ]} - - @command('getAllSentMessageIds', 'getAllSentMessageIDs') - def HandleGetAllSentMessageIds(self): - """ - The same as *getAllInboxMessageIds* but for sent, - result key - *sentMessageIds*. - """ - - queryreturn = sqlQuery( - "SELECT msgid FROM sent WHERE folder='sent'" - " ORDER BY lastactiontime" - ) - return {"sentMessageIds": [ - {'msgid': hexlify(msgid)} for msgid, in queryreturn - ]} - - # after some time getInboxMessagesByAddress should be removed - @command('getInboxMessagesByReceiver', 'legacy:getInboxMessagesByAddress') - def HandleInboxMessagesByReceiver(self, toAddress): - """ - The same as *getAllInboxMessages* but returns only messages - for toAddress. - """ - - queryreturn = sqlQuery( - "SELECT msgid, toaddress, fromaddress, subject, received," - " message, encodingtype, read FROM inbox WHERE folder='inbox'" - " AND toAddress=?", toAddress) - return {"inboxMessages": [ - self._dump_inbox_message(*data) for data in queryreturn - ]} - - @command('getSentMessageById', 'getSentMessageByID') - def HandleGetSentMessageById(self, hid): - """ - Similiar to *getInboxMessageById* but doesn't change message's - read status (sent messages have no such field). - Result key is *sentMessage* - """ - - msgid = self._decode(hid, "hex") - queryreturn = sqlQuery( - "SELECT msgid, toaddress, fromaddress, subject, lastactiontime," - " message, encodingtype, status, ackdata FROM sent WHERE msgid=?", - msgid - ) - try: - return {"sentMessage": [ - self._dump_sent_message(*queryreturn[0]) - ]} - except IndexError: - pass # FIXME inconsistent - - @command('getSentMessagesByAddress', 'getSentMessagesBySender') - def HandleGetSentMessagesByAddress(self, fromAddress): - """ - The same as *getAllSentMessages* but returns only messages - from fromAddress. - """ - - queryreturn = sqlQuery( - "SELECT msgid, toaddress, fromaddress, subject, lastactiontime," - " message, encodingtype, status, ackdata FROM sent" - " WHERE folder='sent' AND fromAddress=? ORDER BY lastactiontime", - fromAddress - ) - return {"sentMessages": [ - self._dump_sent_message(*data) for data in queryreturn - ]} - - @command('getSentMessageByAckData') - def HandleGetSentMessagesByAckData(self, ackData): - """ - Similiar to *getSentMessageById* but searches by ackdata - (also hex encoded). - """ - - ackData = self._decode(ackData, "hex") - queryreturn = sqlQuery( - "SELECT msgid, toaddress, fromaddress, subject, lastactiontime," - " message, encodingtype, status, ackdata FROM sent" - " WHERE ackdata=?", ackData - ) - - try: - return {"sentMessage": [ - self._dump_sent_message(*queryreturn[0]) - ]} - except IndexError: - pass # FIXME inconsistent - - @command('trashMessage') - def HandleTrashMessage(self, msgid): - """ - Trash message by msgid (encoded in hex). Returns a simple message - saying that the message was trashed assuming it ever even existed. - Prior existence is not checked. - """ - msgid = self._decode(msgid, "hex") - # Trash if in inbox table - helper_inbox.trash(msgid) - # Trash if in sent table - sqlExecute("UPDATE sent SET folder='trash' WHERE msgid=?", msgid) - return 'Trashed message (assuming message existed).' - - @command('trashInboxMessage') - def HandleTrashInboxMessage(self, msgid): - """Trash inbox message by msgid (encoded in hex).""" - msgid = self._decode(msgid, "hex") - helper_inbox.trash(msgid) - return 'Trashed inbox message (assuming message existed).' - - @command('trashSentMessage') - def HandleTrashSentMessage(self, msgid): - """Trash sent message by msgid (encoded in hex).""" - msgid = self._decode(msgid, "hex") - sqlExecute('''UPDATE sent SET folder='trash' WHERE msgid=?''', msgid) - return 'Trashed sent message (assuming message existed).' - - @command('sendMessage') - def HandleSendMessage( - self, toAddress, fromAddress, subject, message, - encodingType=2, TTL=4 * 24 * 60 * 60 - ): - """ - Send the message and return ackdata (hex encoded string). - subject and message must be encoded in base64 which may optionally - include line breaks. TTL is specified in seconds; values outside - the bounds of 3600 to 2419200 will be moved to be within those - bounds. TTL defaults to 4 days. - """ - # pylint: disable=too-many-locals - if encodingType not in (2, 3): - raise APIError(6, 'The encoding type must be 2 or 3.') - subject = self._decode(subject, "base64") - message = self._decode(message, "base64") - if len(subject + message) > (2 ** 18 - 500): - raise APIError(27, 'Message is too long.') - if TTL < 60 * 60: - TTL = 60 * 60 - if TTL > 28 * 24 * 60 * 60: - TTL = 28 * 24 * 60 * 60 - toAddress = addBMIfNotPresent(toAddress) - fromAddress = addBMIfNotPresent(fromAddress) - self._verifyAddress(fromAddress) - try: - fromAddressEnabled = self.config.getboolean( - fromAddress, 'enabled') - except BaseException: - raise APIError( - 13, 'Could not find your fromAddress in the keys.dat file.') - if not fromAddressEnabled: - raise APIError(14, 'Your fromAddress is disabled. Cannot send.') - - ackdata = helper_sent.insert( - toAddress=toAddress, fromAddress=fromAddress, - subject=subject, message=message, encoding=encodingType, ttl=TTL) - - toLabel = '' - queryreturn = sqlQuery( - "SELECT label FROM addressbook WHERE address=?", toAddress) - try: - toLabel = queryreturn[0][0] - except IndexError: - pass - - queues.UISignalQueue.put(('displayNewSentMessage', ( - toAddress, toLabel, fromAddress, subject, message, ackdata))) - queues.workerQueue.put(('sendmessage', toAddress)) - - return hexlify(ackdata) - - @command('sendBroadcast') - def HandleSendBroadcast( - self, fromAddress, subject, message, encodingType=2, - TTL=4 * 24 * 60 * 60): - """Send the broadcast message. Similiar to *sendMessage*.""" - - if encodingType not in (2, 3): - raise APIError(6, 'The encoding type must be 2 or 3.') - - subject = self._decode(subject, "base64") - message = self._decode(message, "base64") - if len(subject + message) > (2 ** 18 - 500): - raise APIError(27, 'Message is too long.') - if TTL < 60 * 60: - TTL = 60 * 60 - if TTL > 28 * 24 * 60 * 60: - TTL = 28 * 24 * 60 * 60 - fromAddress = addBMIfNotPresent(fromAddress) - self._verifyAddress(fromAddress) - try: - self.config.getboolean(fromAddress, 'enabled') - except BaseException: - raise APIError( - 13, 'Could not find your fromAddress in the keys.dat file.') - toAddress = str_broadcast_subscribers - - ackdata = helper_sent.insert( - fromAddress=fromAddress, subject=subject, - message=message, status='broadcastqueued', - encoding=encodingType) - - toLabel = str_broadcast_subscribers - queues.UISignalQueue.put(('displayNewSentMessage', ( - toAddress, toLabel, fromAddress, subject, message, ackdata))) - queues.workerQueue.put(('sendbroadcast', '')) - - return hexlify(ackdata) - - @command('getStatus') - def HandleGetStatus(self, ackdata): - """ - Get the status of sent message by its ackdata (hex encoded). - Returns one of these strings: notfound, msgqueued, - broadcastqueued, broadcastsent, doingpubkeypow, awaitingpubkey, - doingmsgpow, forcepow, msgsent, msgsentnoackexpected or ackreceived. - """ - - if len(ackdata) < 76: - # The length of ackData should be at least 38 bytes (76 hex digits) - raise APIError(15, 'Invalid ackData object size.') - ackdata = self._decode(ackdata, "hex") - queryreturn = sqlQuery( - "SELECT status FROM sent where ackdata=?", ackdata) - try: - return queryreturn[0][0] - except IndexError: - return 'notfound' - - @command('addSubscription') - def HandleAddSubscription(self, address, label=''): - """Subscribe to the address. label must be base64 encoded.""" - - if label: - label = self._decode(label, "base64") - try: - label.decode('utf-8') - except UnicodeDecodeError: - raise APIError(17, 'Label is not valid UTF-8 data.') - self._verifyAddress(address) - address = addBMIfNotPresent(address) - # First we must check to see if the address is already in the - # subscriptions list. - queryreturn = sqlQuery( - "SELECT * FROM subscriptions WHERE address=?", address) - if queryreturn: - raise APIError(16, 'You are already subscribed to that address.') - sqlExecute( - "INSERT INTO subscriptions VALUES (?,?,?)", label, address, True) - shared.reloadBroadcastSendersForWhichImWatching() - queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) - queues.UISignalQueue.put(('rerenderSubscriptions', '')) - return 'Added subscription.' - - @command('deleteSubscription') - def HandleDeleteSubscription(self, address): - """ - Unsubscribe from the address. The program does not check whether - you were subscribed in the first place. - """ - - address = addBMIfNotPresent(address) - sqlExecute("DELETE FROM subscriptions WHERE address=?", address) - shared.reloadBroadcastSendersForWhichImWatching() - queues.UISignalQueue.put(('rerenderMessagelistFromLabels', '')) - queues.UISignalQueue.put(('rerenderSubscriptions', '')) - return 'Deleted subscription if it existed.' - - @command('listSubscriptions') - def ListSubscriptions(self): - """ - Returns dict with a list of all subscriptions - in the *subscriptions* key. - """ - - queryreturn = sqlQuery( - "SELECT label, address, enabled FROM subscriptions") - data = [] - for label, address, enabled in queryreturn: - label = shared.fixPotentiallyInvalidUTF8Data(label) - data.append({ - 'label': base64.b64encode(label), - 'address': address, - 'enabled': enabled == 1 - }) - return {'subscriptions': data} - - @command('disseminatePreEncryptedMsg') - def HandleDisseminatePreEncryptedMsg( - self, encryptedPayload, requiredAverageProofOfWorkNonceTrialsPerByte, - requiredPayloadLengthExtraBytes): - """Handle a request to disseminate an encrypted message""" - - # The device issuing this command to PyBitmessage supplies a msg - # object that has already been encrypted but which still needs the POW - # to be done. PyBitmessage accepts this msg object and sends it out - # to the rest of the Bitmessage network as if it had generated - # the message itself. Please do not yet add this to the api doc. - encryptedPayload = self._decode(encryptedPayload, "hex") - # Let us do the POW and attach it to the front - target = 2**64 / ( - ( - len(encryptedPayload) - + requiredPayloadLengthExtraBytes - + 8 - ) * requiredAverageProofOfWorkNonceTrialsPerByte) - logger.info( - '(For msg message via API) Doing proof of work. Total required' - ' difficulty: %s\nRequired small message difficulty: %s', - float(requiredAverageProofOfWorkNonceTrialsPerByte) - / defaults.networkDefaultProofOfWorkNonceTrialsPerByte, - float(requiredPayloadLengthExtraBytes) - / defaults.networkDefaultPayloadLengthExtraBytes, - ) - powStartTime = time.time() - initialHash = hashlib.sha512(encryptedPayload).digest() - trialValue, nonce = proofofwork.run(target, initialHash) - logger.info( - '(For msg message via API) Found proof of work %s\nNonce: %s\n' - 'POW took %s seconds. %s nonce trials per second.', - trialValue, nonce, int(time.time() - powStartTime), - nonce / (time.time() - powStartTime) - ) - encryptedPayload = pack('>Q', nonce) + encryptedPayload - toStreamNumber = decodeVarint(encryptedPayload[16:26])[0] - inventoryHash = calculateInventoryHash(encryptedPayload) - objectType = 2 - TTL = 2.5 * 24 * 60 * 60 - Inventory()[inventoryHash] = ( - objectType, toStreamNumber, encryptedPayload, - int(time.time()) + TTL, '' - ) - logger.info( - 'Broadcasting inv for msg(API disseminatePreEncryptedMsg' - ' command): %s', hexlify(inventoryHash)) - queues.invQueue.put((toStreamNumber, inventoryHash)) - - @command('trashSentMessageByAckData') - def HandleTrashSentMessageByAckDAta(self, ackdata): - """Trash a sent message by ackdata (hex encoded)""" - # This API method should only be used when msgid is not available - ackdata = self._decode(ackdata, "hex") - sqlExecute("UPDATE sent SET folder='trash' WHERE ackdata=?", ackdata) - return 'Trashed sent message (assuming message existed).' - - @command('disseminatePubkey') - def HandleDissimatePubKey(self, payload): - """Handle a request to disseminate a public key""" - - # The device issuing this command to PyBitmessage supplies a pubkey - # object to be disseminated to the rest of the Bitmessage network. - # PyBitmessage accepts this pubkey object and sends it out to the rest - # of the Bitmessage network as if it had generated the pubkey object - # itself. Please do not yet add this to the api doc. - payload = self._decode(payload, "hex") - - # Let us do the POW - target = 2 ** 64 / (( - len(payload) + defaults.networkDefaultPayloadLengthExtraBytes + 8 - ) * defaults.networkDefaultProofOfWorkNonceTrialsPerByte) - logger.info('(For pubkey message via API) Doing proof of work...') - initialHash = hashlib.sha512(payload).digest() - trialValue, nonce = proofofwork.run(target, initialHash) - logger.info( - '(For pubkey message via API) Found proof of work %s Nonce: %s', - trialValue, nonce - ) - payload = pack('>Q', nonce) + payload - - pubkeyReadPosition = 8 # bypass the nonce - if payload[pubkeyReadPosition:pubkeyReadPosition + 4] == \ - '\x00\x00\x00\x00': # if this pubkey uses 8 byte time - pubkeyReadPosition += 8 - else: - pubkeyReadPosition += 4 - addressVersionLength = decodeVarint( - payload[pubkeyReadPosition:pubkeyReadPosition + 10])[1] - pubkeyReadPosition += addressVersionLength - pubkeyStreamNumber = decodeVarint( - payload[pubkeyReadPosition:pubkeyReadPosition + 10])[0] - inventoryHash = calculateInventoryHash(payload) - objectType = 1 # .. todo::: support v4 pubkeys - TTL = 28 * 24 * 60 * 60 - Inventory()[inventoryHash] = ( - objectType, pubkeyStreamNumber, payload, int(time.time()) + TTL, '' - ) - logger.info( - 'broadcasting inv within API command disseminatePubkey with' - ' hash: %s', hexlify(inventoryHash)) - queues.invQueue.put((pubkeyStreamNumber, inventoryHash)) - - @command( - 'getMessageDataByDestinationHash', 'getMessageDataByDestinationTag') - def HandleGetMessageDataByDestinationHash(self, requestedHash): - """Handle a request to get message data by destination hash""" - - # Method will eventually be used by a particular Android app to - # select relevant messages. Do not yet add this to the api - # doc. - if len(requestedHash) != 32: - raise APIError( - 19, 'The length of hash should be 32 bytes (encoded in hex' - ' thus 64 characters).') - requestedHash = self._decode(requestedHash, "hex") - - # This is not a particularly commonly used API function. Before we - # use it we'll need to fill out a field in our inventory database - # which is blank by default (first20bytesofencryptedmessage). - queryreturn = sqlQuery( - "SELECT hash, payload FROM inventory WHERE tag = ''" - " and objecttype = 2") - with SqlBulkExecute() as sql: - for hash01, payload in queryreturn: - readPosition = 16 # Nonce length + time length - # Stream Number length - readPosition += decodeVarint( - payload[readPosition:readPosition + 10])[1] - t = (payload[readPosition:readPosition + 32], hash01) - sql.execute("UPDATE inventory SET tag=? WHERE hash=?", *t) - - queryreturn = sqlQuery( - "SELECT payload FROM inventory WHERE tag = ?", requestedHash) - return {"receivedMessageDatas": [ - {'data': hexlify(payload)} for payload, in queryreturn - ]} - - @command('clientStatus') - def HandleClientStatus(self): - """ - Returns the bitmessage status as dict with keys *networkConnections*, - *numberOfMessagesProcessed*, *numberOfBroadcastsProcessed*, - *numberOfPubkeysProcessed*, *pendingDownload*, *networkStatus*, - *softwareName*, *softwareVersion*. *networkStatus* will be one of - these strings: "notConnected", - "connectedButHaveNotReceivedIncomingConnections", - or "connectedAndReceivingIncomingConnections". - """ - - connections_num = len(network.stats.connectedHostsList()) - if connections_num == 0: - networkStatus = 'notConnected' - elif state.clientHasReceivedIncomingConnections: - networkStatus = 'connectedAndReceivingIncomingConnections' - else: - networkStatus = 'connectedButHaveNotReceivedIncomingConnections' - return { - 'networkConnections': connections_num, - 'numberOfMessagesProcessed': state.numberOfMessagesProcessed, - 'numberOfBroadcastsProcessed': state.numberOfBroadcastsProcessed, - 'numberOfPubkeysProcessed': state.numberOfPubkeysProcessed, - 'pendingDownload': network.stats.pendingDownload(), - 'networkStatus': networkStatus, - 'softwareName': 'PyBitmessage', - 'softwareVersion': softwareVersion - } - - @command('helloWorld') - def HandleHelloWorld(self, a, b): - """Test two string params""" - return a + '-' + b - - @command('add') - def HandleAdd(self, a, b): - """Test two numeric params""" - return a + b - - @testmode('clearUISignalQueue') - def HandleclearUISignalQueue(self): - """clear UISignalQueue""" - queues.UISignalQueue.queue.clear() - return "success" - - @command('statusBar') - def HandleStatusBar(self, message): - """Update GUI statusbar message""" - queues.UISignalQueue.put(('updateStatusBar', message)) - - @testmode('getStatusBar') - def HandleGetStatusBar(self): - """Get GUI statusbar message""" - try: - _, data = queues.UISignalQueue.get(block=False) - except queue.Empty: - return None - return data - - @testmode('undeleteMessage') - def HandleUndeleteMessage(self, msgid): - """Undelete message""" - msgid = self._decode(msgid, "hex") - helper_inbox.undeleteMessage(msgid) - return "Undeleted message" - - @command('deleteAndVacuum') - def HandleDeleteAndVacuum(self): - """Cleanup trashes and vacuum messages database""" - sqlStoredProcedure('deleteandvacuume') - return 'done' - - @command('shutdown') - def HandleShutdown(self): - """Shutdown the bitmessage. Returns 'done'.""" - # backward compatible trick because False == 0 is True - state.shutdown = False - return 'done' - - def _handle_request(self, method, params): - try: - # pylint: disable=attribute-defined-outside-init - self._method = method - func = self._handlers[method] - return func(self, *params) - except KeyError: - raise APIError(20, 'Invalid method: %s' % method) - except TypeError as e: - msg = 'Unexpected API Failure - %s' % e - if 'argument' not in str(e): - raise APIError(21, msg) - argcount = len(params) - maxcount = func.func_code.co_argcount - if argcount > maxcount: - msg = ( - 'Command %s takes at most %s parameters (%s given)' - % (method, maxcount, argcount)) - else: - mincount = maxcount - len(func.func_defaults or []) - if argcount < mincount: - msg = ( - 'Command %s takes at least %s parameters (%s given)' - % (method, mincount, argcount)) - raise APIError(0, msg) - finally: - state.last_api_response = time.time() - - def _dispatch(self, method, params): - _fault = None - - try: - return self._handle_request(method, params) - except APIError as e: - _fault = e - except varintDecodeError as e: - logger.error(e) - _fault = APIError( - 26, 'Data contains a malformed varint. Some details: %s' % e) - except Exception as e: - logger.exception(e) - _fault = APIError(21, 'Unexpected API Failure - %s' % e) - - if _fault: - if self.config.safeGet( - 'bitmessagesettings', 'apivariant') == 'legacy': - return str(_fault) - else: - raise _fault # pylint: disable=raising-bad-type - - def _listMethods(self): - """List all API commands""" - return self._handlers.keys() - - def _methodHelp(self, method): - return self._handlers[method].__doc__ diff --git a/src/tests/mock/pybitmessage/bitmessagecli.py b/src/tests/mock/pybitmessage/bitmessagecli.py deleted file mode 100644 index adcab8b1..00000000 --- a/src/tests/mock/pybitmessage/bitmessagecli.py +++ /dev/null @@ -1,1887 +0,0 @@ -#!/usr/bin/python2.7 -# -*- coding: utf-8 -*- -# pylint: disable=too-many-lines,global-statement,too-many-branches,too-many-statements,inconsistent-return-statements -# pylint: disable=too-many-nested-blocks,too-many-locals,protected-access,too-many-arguments,too-many-function-args -# pylint: disable=no-member -""" -Created by Adam Melton (.dok) referenceing https://bitmessage.org/wiki/API_Reference for API documentation -Distributed under the MIT/X11 software license. See http://www.opensource.org/licenses/mit-license.php. - -This is an example of a daemon client for PyBitmessage 0.6.2, by .dok (Version 0.3.1) , modified - -TODO: fix the following (currently ignored) violations: - -""" - -import datetime -import imghdr -import json -import ntpath -import os -import socket -import sys -import time -import xmlrpclib - -from bmconfigparser import BMConfigParser - - -api = '' -keysName = 'keys.dat' -keysPath = 'keys.dat' -usrPrompt = 0 # 0 = First Start, 1 = prompt, 2 = no prompt if the program is starting up -knownAddresses = dict() - - -def userInput(message): - """Checks input for exit or quit. Also formats for input, etc""" - - global usrPrompt - - print('\n' + message) - uInput = raw_input('> ') - - if uInput.lower() == 'exit': # Returns the user to the main menu - usrPrompt = 1 - main() - - elif uInput.lower() == 'quit': # Quits the program - print('\n Bye\n') - sys.exit(0) - - else: - return uInput - - -def restartBmNotify(): - """Prompt the user to restart Bitmessage""" - print('\n *******************************************************************') - print(' WARNING: If Bitmessage is running locally, you must restart it now.') - print(' *******************************************************************\n') - - -# Begin keys.dat interactions - - -def lookupAppdataFolder(): - """gets the appropriate folders for the .dat files depending on the OS. Taken from bitmessagemain.py""" - - APPNAME = "PyBitmessage" - if sys.platform == 'darwin': - if "HOME" in os.environ: - dataFolder = os.path.join(os.environ["HOME"], "Library/Application support/", APPNAME) + '/' - else: - print( - ' Could not find home folder, please report ' - 'this message and your OS X version to the Daemon Github.') - sys.exit(1) - - elif 'win32' in sys.platform or 'win64' in sys.platform: - dataFolder = os.path.join(os.environ['APPDATA'], APPNAME) + '\\' - else: - dataFolder = os.path.expanduser(os.path.join("~", ".config/" + APPNAME + "/")) - return dataFolder - - -def configInit(): - """Initialised the configuration""" - - BMConfigParser().add_section('bitmessagesettings') - # Sets the bitmessage port to stop the warning about the api not properly - # being setup. This is in the event that the keys.dat is in a different - # directory or is created locally to connect to a machine remotely. - BMConfigParser().set('bitmessagesettings', 'port', '8444') - BMConfigParser().set('bitmessagesettings', 'apienabled', 'true') # Sets apienabled to true in keys.dat - - with open(keysName, 'wb') as configfile: - BMConfigParser().write(configfile) - - print('\n ' + str(keysName) + ' Initalized in the same directory as daemon.py') - print(' You will now need to configure the ' + str(keysName) + ' file.\n') - - -def apiInit(apiEnabled): - """Initialise the API""" - - global usrPrompt - BMConfigParser().read(keysPath) - - if apiEnabled is False: # API information there but the api is disabled. - uInput = userInput("The API is not enabled. Would you like to do that now, (Y)es or (N)o?").lower() - - if uInput == "y": - BMConfigParser().set('bitmessagesettings', 'apienabled', 'true') # Sets apienabled to true in keys.dat - with open(keysPath, 'wb') as configfile: - BMConfigParser().write(configfile) - - print('Done') - restartBmNotify() - return True - - elif uInput == "n": - print(' \n************************************************************') - print(' Daemon will not work when the API is disabled. ') - print(' Please refer to the Bitmessage Wiki on how to setup the API.') - print(' ************************************************************\n') - usrPrompt = 1 - main() - - else: - print('\n Invalid Entry\n') - usrPrompt = 1 - main() - - elif apiEnabled: # API correctly setup - # Everything is as it should be - return True - - else: # API information was not present. - print('\n ' + str(keysPath) + ' not properly configured!\n') - uInput = userInput("Would you like to do this now, (Y)es or (N)o?").lower() - - if uInput == "y": # User said yes, initalize the api by writing these values to the keys.dat file - print(' ') - - apiUsr = userInput("API Username") - apiPwd = userInput("API Password") - apiPort = userInput("API Port") - apiEnabled = userInput("API Enabled? (True) or (False)").lower() - daemon = userInput("Daemon mode Enabled? (True) or (False)").lower() - - if (daemon != 'true' and daemon != 'false'): - print('\n Invalid Entry for Daemon.\n') - uInput = 1 - main() - - print(' -----------------------------------\n') - - # sets the bitmessage port to stop the warning about the api not properly - # being setup. This is in the event that the keys.dat is in a different - # directory or is created locally to connect to a machine remotely. - BMConfigParser().set('bitmessagesettings', 'port', '8444') - BMConfigParser().set('bitmessagesettings', 'apienabled', 'true') - BMConfigParser().set('bitmessagesettings', 'apiport', apiPort) - BMConfigParser().set('bitmessagesettings', 'apiinterface', '127.0.0.1') - BMConfigParser().set('bitmessagesettings', 'apiusername', apiUsr) - BMConfigParser().set('bitmessagesettings', 'apipassword', apiPwd) - BMConfigParser().set('bitmessagesettings', 'daemon', daemon) - with open(keysPath, 'wb') as configfile: - BMConfigParser().write(configfile) - - print('\n Finished configuring the keys.dat file with API information.\n') - restartBmNotify() - return True - - elif uInput == "n": - print('\n ***********************************************************') - print(' Please refer to the Bitmessage Wiki on how to setup the API.') - print(' ***********************************************************\n') - usrPrompt = 1 - main() - else: - print(' \nInvalid entry\n') - usrPrompt = 1 - main() - - -def apiData(): - """TBC""" - - global keysName - global keysPath - global usrPrompt - - BMConfigParser().read(keysPath) # First try to load the config file (the keys.dat file) from the program directory - - try: - BMConfigParser().get('bitmessagesettings', 'port') - appDataFolder = '' - except: # noqa:E722 - # Could not load the keys.dat file in the program directory. Perhaps it is in the appdata directory. - appDataFolder = lookupAppdataFolder() - keysPath = appDataFolder + keysPath - BMConfigParser().read(keysPath) - - try: - BMConfigParser().get('bitmessagesettings', 'port') - except: # noqa:E722 - # keys.dat was not there either, something is wrong. - print('\n ******************************************************************') - print(' There was a problem trying to access the Bitmessage keys.dat file') - print(' or keys.dat is not set up correctly') - print(' Make sure that daemon is in the same directory as Bitmessage. ') - print(' ******************************************************************\n') - - uInput = userInput("Would you like to create a keys.dat in the local directory, (Y)es or (N)o?").lower() - - if uInput in ("y", "yes"): - configInit() - keysPath = keysName - usrPrompt = 0 - main() - elif uInput in ("n", "no"): - print('\n Trying Again.\n') - usrPrompt = 0 - main() - else: - print('\n Invalid Input.\n') - - usrPrompt = 1 - main() - - try: # checks to make sure that everyting is configured correctly. Excluding apiEnabled, it is checked after - BMConfigParser().get('bitmessagesettings', 'apiport') - BMConfigParser().get('bitmessagesettings', 'apiinterface') - BMConfigParser().get('bitmessagesettings', 'apiusername') - BMConfigParser().get('bitmessagesettings', 'apipassword') - - except: # noqa:E722 - apiInit("") # Initalize the keys.dat file with API information - - # keys.dat file was found or appropriately configured, allow information retrieval - # apiEnabled = - # apiInit(BMConfigParser().safeGetBoolean('bitmessagesettings','apienabled')) - # #if false it will prompt the user, if true it will return true - - BMConfigParser().read(keysPath) # read again since changes have been made - apiPort = int(BMConfigParser().get('bitmessagesettings', 'apiport')) - apiInterface = BMConfigParser().get('bitmessagesettings', 'apiinterface') - apiUsername = BMConfigParser().get('bitmessagesettings', 'apiusername') - apiPassword = BMConfigParser().get('bitmessagesettings', 'apipassword') - - print('\n API data successfully imported.\n') - - # Build the api credentials - return "http://" + apiUsername + ":" + apiPassword + "@" + apiInterface + ":" + str(apiPort) + "/" - - -# End keys.dat interactions - - -def apiTest(): - """Tests the API connection to bitmessage. Returns true if it is connected.""" - - try: - result = api.add(2, 3) - except: # noqa:E722 - return False - - return result == 5 - - -def bmSettings(): - """Allows the viewing and modification of keys.dat settings.""" - - global keysPath - global usrPrompt - - keysPath = 'keys.dat' - - BMConfigParser().read(keysPath) # Read the keys.dat - try: - port = BMConfigParser().get('bitmessagesettings', 'port') - except: # noqa:E722 - print('\n File not found.\n') - usrPrompt = 0 - main() - - startonlogon = BMConfigParser().safeGetBoolean('bitmessagesettings', 'startonlogon') - minimizetotray = BMConfigParser().safeGetBoolean('bitmessagesettings', 'minimizetotray') - showtraynotifications = BMConfigParser().safeGetBoolean('bitmessagesettings', 'showtraynotifications') - startintray = BMConfigParser().safeGetBoolean('bitmessagesettings', 'startintray') - defaultnoncetrialsperbyte = BMConfigParser().get('bitmessagesettings', 'defaultnoncetrialsperbyte') - defaultpayloadlengthextrabytes = BMConfigParser().get('bitmessagesettings', 'defaultpayloadlengthextrabytes') - daemon = BMConfigParser().safeGetBoolean('bitmessagesettings', 'daemon') - - socksproxytype = BMConfigParser().get('bitmessagesettings', 'socksproxytype') - sockshostname = BMConfigParser().get('bitmessagesettings', 'sockshostname') - socksport = BMConfigParser().get('bitmessagesettings', 'socksport') - socksauthentication = BMConfigParser().safeGetBoolean('bitmessagesettings', 'socksauthentication') - socksusername = BMConfigParser().get('bitmessagesettings', 'socksusername') - sockspassword = BMConfigParser().get('bitmessagesettings', 'sockspassword') - - print('\n -----------------------------------') - print(' | Current Bitmessage Settings |') - print(' -----------------------------------') - print(' port = ' + port) - print(' startonlogon = ' + str(startonlogon)) - print(' minimizetotray = ' + str(minimizetotray)) - print(' showtraynotifications = ' + str(showtraynotifications)) - print(' startintray = ' + str(startintray)) - print(' defaultnoncetrialsperbyte = ' + defaultnoncetrialsperbyte) - print(' defaultpayloadlengthextrabytes = ' + defaultpayloadlengthextrabytes) - print(' daemon = ' + str(daemon)) - print('\n ------------------------------------') - print(' | Current Connection Settings |') - print(' -----------------------------------') - print(' socksproxytype = ' + socksproxytype) - print(' sockshostname = ' + sockshostname) - print(' socksport = ' + socksport) - print(' socksauthentication = ' + str(socksauthentication)) - print(' socksusername = ' + socksusername) - print(' sockspassword = ' + sockspassword) - print(' ') - - uInput = userInput("Would you like to modify any of these settings, (Y)es or (N)o?").lower() - - if uInput == "y": - while True: # loops if they mistype the setting name, they can exit the loop with 'exit' - invalidInput = False - uInput = userInput("What setting would you like to modify?").lower() - print(' ') - - if uInput == "port": - print(' Current port number: ' + port) - uInput = userInput("Enter the new port number.") - BMConfigParser().set('bitmessagesettings', 'port', str(uInput)) - elif uInput == "startonlogon": - print(' Current status: ' + str(startonlogon)) - uInput = userInput("Enter the new status.") - BMConfigParser().set('bitmessagesettings', 'startonlogon', str(uInput)) - elif uInput == "minimizetotray": - print(' Current status: ' + str(minimizetotray)) - uInput = userInput("Enter the new status.") - BMConfigParser().set('bitmessagesettings', 'minimizetotray', str(uInput)) - elif uInput == "showtraynotifications": - print(' Current status: ' + str(showtraynotifications)) - uInput = userInput("Enter the new status.") - BMConfigParser().set('bitmessagesettings', 'showtraynotifications', str(uInput)) - elif uInput == "startintray": - print(' Current status: ' + str(startintray)) - uInput = userInput("Enter the new status.") - BMConfigParser().set('bitmessagesettings', 'startintray', str(uInput)) - elif uInput == "defaultnoncetrialsperbyte": - print(' Current default nonce trials per byte: ' + defaultnoncetrialsperbyte) - uInput = userInput("Enter the new defaultnoncetrialsperbyte.") - BMConfigParser().set('bitmessagesettings', 'defaultnoncetrialsperbyte', str(uInput)) - elif uInput == "defaultpayloadlengthextrabytes": - print(' Current default payload length extra bytes: ' + defaultpayloadlengthextrabytes) - uInput = userInput("Enter the new defaultpayloadlengthextrabytes.") - BMConfigParser().set('bitmessagesettings', 'defaultpayloadlengthextrabytes', str(uInput)) - elif uInput == "daemon": - print(' Current status: ' + str(daemon)) - uInput = userInput("Enter the new status.").lower() - BMConfigParser().set('bitmessagesettings', 'daemon', str(uInput)) - elif uInput == "socksproxytype": - print(' Current socks proxy type: ' + socksproxytype) - print("Possibilities: 'none', 'SOCKS4a', 'SOCKS5'.") - uInput = userInput("Enter the new socksproxytype.") - BMConfigParser().set('bitmessagesettings', 'socksproxytype', str(uInput)) - elif uInput == "sockshostname": - print(' Current socks host name: ' + sockshostname) - uInput = userInput("Enter the new sockshostname.") - BMConfigParser().set('bitmessagesettings', 'sockshostname', str(uInput)) - elif uInput == "socksport": - print(' Current socks port number: ' + socksport) - uInput = userInput("Enter the new socksport.") - BMConfigParser().set('bitmessagesettings', 'socksport', str(uInput)) - elif uInput == "socksauthentication": - print(' Current status: ' + str(socksauthentication)) - uInput = userInput("Enter the new status.") - BMConfigParser().set('bitmessagesettings', 'socksauthentication', str(uInput)) - elif uInput == "socksusername": - print(' Current socks username: ' + socksusername) - uInput = userInput("Enter the new socksusername.") - BMConfigParser().set('bitmessagesettings', 'socksusername', str(uInput)) - elif uInput == "sockspassword": - print(' Current socks password: ' + sockspassword) - uInput = userInput("Enter the new password.") - BMConfigParser().set('bitmessagesettings', 'sockspassword', str(uInput)) - else: - print("\n Invalid input. Please try again.\n") - invalidInput = True - - if invalidInput is not True: # don't prompt if they made a mistake. - uInput = userInput("Would you like to change another setting, (Y)es or (N)o?").lower() - - if uInput != "y": - print('\n Changes Made.\n') - with open(keysPath, 'wb') as configfile: - BMConfigParser().write(configfile) - restartBmNotify() - break - - elif uInput == "n": - usrPrompt = 1 - main() - else: - print("Invalid input.") - usrPrompt = 1 - main() - - -def validAddress(address): - """Predicate to test address validity""" - address_information = json.loads(api.decodeAddress(address)) - - return 'success' in str(address_information['status']).lower() - - -def getAddress(passphrase, vNumber, sNumber): - """Get a deterministic address""" - passphrase = passphrase.encode('base64') # passphrase must be encoded - - return api.getDeterministicAddress(passphrase, vNumber, sNumber) - - -def subscribe(): - """Subscribe to an address""" - global usrPrompt - - while True: - address = userInput("What address would you like to subscribe to?") - - if address == "c": - usrPrompt = 1 - print(' ') - main() - elif validAddress(address) is False: - print('\n Invalid. "c" to cancel. Please try again.\n') - else: - break - - label = userInput("Enter a label for this address.") - label = label.encode('base64') - - api.addSubscription(address, label) - print('\n You are now subscribed to: ' + address + '\n') - - -def unsubscribe(): - """Unsusbcribe from an address""" - global usrPrompt - - while True: - address = userInput("What address would you like to unsubscribe from?") - - if address == "c": - usrPrompt = 1 - print(' ') - main() - elif validAddress(address) is False: - print('\n Invalid. "c" to cancel. Please try again.\n') - else: - break - - userInput("Are you sure, (Y)es or (N)o?").lower() # uInput = - - api.deleteSubscription(address) - print('\n You are now unsubscribed from: ' + address + '\n') - - -def listSubscriptions(): - """List subscriptions""" - - global usrPrompt - print('\nLabel, Address, Enabled\n') - try: - print(api.listSubscriptions()) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - print(' ') - - -def createChan(): - """Create a channel""" - - global usrPrompt - password = userInput("Enter channel name") - password = password.encode('base64') - try: - print(api.createChan(password)) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def joinChan(): - """Join a channel""" - - global usrPrompt - while True: - address = userInput("Enter channel address") - - if address == "c": - usrPrompt = 1 - print(' ') - main() - elif validAddress(address) is False: - print('\n Invalid. "c" to cancel. Please try again.\n') - else: - break - - password = userInput("Enter channel name") - password = password.encode('base64') - try: - print(api.joinChan(password, address)) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def leaveChan(): - """Leave a channel""" - - global usrPrompt - while True: - address = userInput("Enter channel address") - - if address == "c": - usrPrompt = 1 - print(' ') - main() - elif validAddress(address) is False: - print('\n Invalid. "c" to cancel. Please try again.\n') - else: - break - - try: - print(api.leaveChan(address)) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def listAdd(): - """List all of the addresses and their info""" - global usrPrompt - try: - jsonAddresses = json.loads(api.listAddresses()) - numAddresses = len(jsonAddresses['addresses']) # Number of addresses - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - # print('\nAddress Number,Label,Address,Stream,Enabled\n') - print('\n --------------------------------------------------------------------------') - print(' | # | Label | Address |S#|Enabled|') - print(' |---|-------------------|-------------------------------------|--|-------|') - for addNum in range(0, numAddresses): # processes all of the addresses and lists them out - label = (jsonAddresses['addresses'][addNum]['label']).encode( - 'utf') # may still misdiplay in some consoles - address = str(jsonAddresses['addresses'][addNum]['address']) - stream = str(jsonAddresses['addresses'][addNum]['stream']) - enabled = str(jsonAddresses['addresses'][addNum]['enabled']) - - if len(label) > 19: - label = label[:16] + '...' - - print(''.join([ - ' |', - str(addNum).ljust(3), - '|', - label.ljust(19), - '|', - address.ljust(37), - '|', - stream.ljust(1), - '|', - enabled.ljust(7), - '|', - ])) - - print(''.join([ - ' ', - 74 * '-', - '\n', - ])) - - -def genAdd(lbl, deterministic, passphrase, numOfAdd, addVNum, streamNum, ripe): - """Generate address""" - - global usrPrompt - - if deterministic is False: # Generates a new address with the user defined label. non-deterministic - addressLabel = lbl.encode('base64') - try: - generatedAddress = api.createRandomAddress(addressLabel) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - return generatedAddress - - elif deterministic: # Generates a new deterministic address with the user inputs. - passphrase = passphrase.encode('base64') - try: - generatedAddress = api.createDeterministicAddresses(passphrase, numOfAdd, addVNum, streamNum, ripe) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - return generatedAddress - - return 'Entry Error' - - -def saveFile(fileName, fileData): - """Allows attachments and messages/broadcats to be saved""" - - # This section finds all invalid characters and replaces them with ~ - fileName = fileName.replace(" ", "") - fileName = fileName.replace("/", "~") - # fileName = fileName.replace("\\", "~") How do I get this to work...? - fileName = fileName.replace(":", "~") - fileName = fileName.replace("*", "~") - fileName = fileName.replace("?", "~") - fileName = fileName.replace('"', "~") - fileName = fileName.replace("<", "~") - fileName = fileName.replace(">", "~") - fileName = fileName.replace("|", "~") - - directory = os.path.abspath('attachments') - - if not os.path.exists(directory): - os.makedirs(directory) - - filePath = os.path.join(directory, fileName) - - with open(filePath, 'wb+') as path_to_file: - path_to_file.write(fileData.decode("base64")) - print('\n Successfully saved ' + filePath + '\n') - - -def attachment(): - """Allows users to attach a file to their message or broadcast""" - - theAttachmentS = '' - - while True: - - isImage = False - theAttachment = '' - - while True: # loops until valid path is entered - filePath = userInput( - '\nPlease enter the path to the attachment or just the attachment name if in this folder.') - - try: - with open(filePath): - break - except IOError: - print('\n %s was not found on your filesystem or can not be opened.\n' % filePath) - - # print(filesize, and encoding estimate with confirmation if file is over X size(1mb?)) - invSize = os.path.getsize(filePath) - invSize = (invSize / 1024) # Converts to kilobytes - round(invSize, 2) # Rounds to two decimal places - - if invSize > 500.0: # If over 500KB - print(''.join([ - '\n WARNING:The file that you are trying to attach is ', - invSize, - 'KB and will take considerable time to send.\n' - ])) - uInput = userInput('Are you sure you still want to attach it, (Y)es or (N)o?').lower() - - if uInput != "y": - print('\n Attachment discarded.\n') - return '' - elif invSize > 184320.0: # If larger than 180MB, discard. - print('\n Attachment too big, maximum allowed size:180MB\n') - main() - - pathLen = len(str(ntpath.basename(filePath))) # Gets the length of the filepath excluding the filename - fileName = filePath[(len(str(filePath)) - pathLen):] # reads the filename - - filetype = imghdr.what(filePath) # Tests if it is an image file - if filetype is not None: - print('\n ---------------------------------------------------') - print(' Attachment detected as an Image.') - print(' tags will automatically be included,') - print(' allowing the recipient to view the image') - print(' using the "View HTML code..." option in Bitmessage.') - print(' ---------------------------------------------------\n') - isImage = True - time.sleep(2) - - # Alert the user that the encoding process may take some time. - print('\n Encoding Attachment, Please Wait ...\n') - - with open(filePath, 'rb') as f: # Begin the actual encoding - data = f.read(188743680) # Reads files up to 180MB, the maximum size for Bitmessage. - data = data.encode("base64") - - if isImage: # If it is an image, include image tags in the message - theAttachment = """ - - - -Filename:%s -Filesize:%sKB -Encoding:base64 - -
-
- %s -
-
""" % (fileName, invSize, fileName, filetype, data) - else: # Else it is not an image so do not include the embedded image code. - theAttachment = """ - - - -Filename:%s -Filesize:%sKB -Encoding:base64 - -""" % (fileName, invSize, fileName, fileName, data) - - uInput = userInput('Would you like to add another attachment, (Y)es or (N)o?').lower() - - if uInput in ('y', 'yes'): # Allows multiple attachments to be added to one message - theAttachmentS = str(theAttachmentS) + str(theAttachment) + '\n\n' - elif uInput in ('n', 'no'): - break - - theAttachmentS = theAttachmentS + theAttachment - return theAttachmentS - - -def sendMsg(toAddress, fromAddress, subject, message): - """ - With no arguments sent, sendMsg fills in the blanks. - subject and message must be encoded before they are passed. - """ - - global usrPrompt - if validAddress(toAddress) is False: - while True: - toAddress = userInput("What is the To Address?") - - if toAddress == "c": - usrPrompt = 1 - print(' ') - main() - elif validAddress(toAddress) is False: - print('\n Invalid Address. "c" to cancel. Please try again.\n') - else: - break - - if validAddress(fromAddress) is False: - try: - jsonAddresses = json.loads(api.listAddresses()) - numAddresses = len(jsonAddresses['addresses']) # Number of addresses - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - if numAddresses > 1: # Ask what address to send from if multiple addresses - found = False - while True: - print(' ') - fromAddress = userInput("Enter an Address or Address Label to send from.") - - if fromAddress == "exit": - usrPrompt = 1 - main() - - for addNum in range(0, numAddresses): # processes all of the addresses - label = jsonAddresses['addresses'][addNum]['label'] - address = jsonAddresses['addresses'][addNum]['address'] - if fromAddress == label: # address entered was a label and is found - fromAddress = address - found = True - break - - if found is False: - if validAddress(fromAddress) is False: - print('\n Invalid Address. Please try again.\n') - - else: - for addNum in range(0, numAddresses): # processes all of the addresses - address = jsonAddresses['addresses'][addNum]['address'] - if fromAddress == address: # address entered was a found in our addressbook. - found = True - break - - if found is False: - print('\n The address entered is not one of yours. Please try again.\n') - - if found: - break # Address was found - - else: # Only one address in address book - print('\n Using the only address in the addressbook to send from.\n') - fromAddress = jsonAddresses['addresses'][0]['address'] - - if not subject: - subject = userInput("Enter your Subject.") - subject = subject.encode('base64') - if not message: - message = userInput("Enter your Message.") - - uInput = userInput('Would you like to add an attachment, (Y)es or (N)o?').lower() - if uInput == "y": - message = message + '\n\n' + attachment() - - message = message.encode('base64') - - try: - ackData = api.sendMessage(toAddress, fromAddress, subject, message) - print('\n Message Status:', api.getStatus(ackData), '\n') - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def sendBrd(fromAddress, subject, message): - """Send a broadcast""" - - global usrPrompt - if not fromAddress: - - try: - jsonAddresses = json.loads(api.listAddresses()) - numAddresses = len(jsonAddresses['addresses']) # Number of addresses - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - if numAddresses > 1: # Ask what address to send from if multiple addresses - found = False - while True: - fromAddress = userInput("\nEnter an Address or Address Label to send from.") - - if fromAddress == "exit": - usrPrompt = 1 - main() - - for addNum in range(0, numAddresses): # processes all of the addresses - label = jsonAddresses['addresses'][addNum]['label'] - address = jsonAddresses['addresses'][addNum]['address'] - if fromAddress == label: # address entered was a label and is found - fromAddress = address - found = True - break - - if found is False: - if validAddress(fromAddress) is False: - print('\n Invalid Address. Please try again.\n') - - else: - for addNum in range(0, numAddresses): # processes all of the addresses - address = jsonAddresses['addresses'][addNum]['address'] - if fromAddress == address: # address entered was a found in our addressbook. - found = True - break - - if found is False: - print('\n The address entered is not one of yours. Please try again.\n') - - if found: - break # Address was found - - else: # Only one address in address book - print('\n Using the only address in the addressbook to send from.\n') - fromAddress = jsonAddresses['addresses'][0]['address'] - - if not subject: - subject = userInput("Enter your Subject.") - subject = subject.encode('base64') - if not message: - message = userInput("Enter your Message.") - - uInput = userInput('Would you like to add an attachment, (Y)es or (N)o?').lower() - if uInput == "y": - message = message + '\n\n' + attachment() - - message = message.encode('base64') - - try: - ackData = api.sendBroadcast(fromAddress, subject, message) - print('\n Message Status:', api.getStatus(ackData), '\n') - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def inbox(unreadOnly=False): - """Lists the messages by: Message Number, To Address Label, From Address Label, Subject, Received Time)""" - - global usrPrompt - try: - inboxMessages = json.loads(api.getAllInboxMessages()) - numMessages = len(inboxMessages['inboxMessages']) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - messagesPrinted = 0 - messagesUnread = 0 - for msgNum in range(0, numMessages): # processes all of the messages in the inbox - message = inboxMessages['inboxMessages'][msgNum] - # if we are displaying all messages or if this message is unread then display it - if not unreadOnly or not message['read']: - print(' -----------------------------------\n') - print(' Message Number:', msgNum) # Message Number) - print(' To:', getLabelForAddress(message['toAddress'])) # Get the to address) - print(' From:', getLabelForAddress(message['fromAddress'])) # Get the from address) - print(' Subject:', message['subject'].decode('base64')) # Get the subject) - print(''.join([ - ' Received:', - datetime.datetime.fromtimestamp( - float(message['receivedTime'])).strftime('%Y-%m-%d %H:%M:%S'), - ])) - messagesPrinted += 1 - if not message['read']: - messagesUnread += 1 - - if messagesPrinted % 20 == 0 and messagesPrinted != 0: - userInput('(Press Enter to continue or type (Exit) to return to the main menu.)').lower() # uInput = - - print('\n -----------------------------------') - print(' There are %d unread messages of %d messages in the inbox.' % (messagesUnread, numMessages)) - print(' -----------------------------------\n') - - -def outbox(): - """TBC""" - - global usrPrompt - try: - outboxMessages = json.loads(api.getAllSentMessages()) - numMessages = len(outboxMessages['sentMessages']) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - for msgNum in range(0, numMessages): # processes all of the messages in the outbox - print('\n -----------------------------------\n') - print(' Message Number:', msgNum) # Message Number) - # print(' Message ID:', outboxMessages['sentMessages'][msgNum]['msgid']) - print(' To:', getLabelForAddress( - outboxMessages['sentMessages'][msgNum]['toAddress'] - )) # Get the to address) - # Get the from address - print(' From:', getLabelForAddress(outboxMessages['sentMessages'][msgNum]['fromAddress'])) - print(' Subject:', outboxMessages['sentMessages'][msgNum]['subject'].decode('base64')) # Get the subject) - print(' Status:', outboxMessages['sentMessages'][msgNum]['status']) # Get the subject) - - # print(''.join([ - # ' Last Action Time:', - # datetime.datetime.fromtimestamp( - # float(outboxMessages['sentMessages'][msgNum]['lastActionTime'])).strftime('%Y-%m-%d %H:%M:%S'), - # ])) - print(''.join([ - ' Last Action Time:', - datetime.datetime.fromtimestamp( - float(outboxMessages['sentMessages'][msgNum]['lastActionTime'])).strftime('%Y-%m-%d %H:%M:%S'), - ])) - - if msgNum % 20 == 0 and msgNum != 0: - userInput('(Press Enter to continue or type (Exit) to return to the main menu.)').lower() # uInput = - - print('\n -----------------------------------') - print(' There are ', numMessages, ' messages in the outbox.') - print(' -----------------------------------\n') - - -def readSentMsg(msgNum): - """Opens a sent message for reading""" - - global usrPrompt - try: - outboxMessages = json.loads(api.getAllSentMessages()) - numMessages = len(outboxMessages['sentMessages']) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - print(' ') - - if msgNum >= numMessages: - print('\n Invalid Message Number.\n') - main() - - # Begin attachment detection - message = outboxMessages['sentMessages'][msgNum]['message'].decode('base64') - - while True: # Allows multiple messages to be downloaded/saved - if ';base64,' in message: # Found this text in the message, there is probably an attachment. - attPos = message.index(";base64,") # Finds the attachment position - attEndPos = message.index("' />") # Finds the end of the attachment - # attLen = attEndPos - attPos #Finds the length of the message - - if 'alt = "' in message: # We can get the filename too - fnPos = message.index('alt = "') # Finds position of the filename - fnEndPos = message.index('" src=') # Finds the end position - # fnLen = fnEndPos - fnPos #Finds the length of the filename - - fileName = message[fnPos + 7:fnEndPos] - else: - fnPos = attPos - fileName = 'Attachment' - - uInput = userInput( - '\n Attachment Detected. Would you like to save the attachment, (Y)es or (N)o?').lower() - if uInput in ("y", 'yes'): - - this_attachment = message[attPos + 9:attEndPos] - saveFile(fileName, this_attachment) - - message = message[:fnPos] + '~~' + message[(attEndPos + 4):] - - else: - break - - # End attachment Detection - - print('\n To:', getLabelForAddress(outboxMessages['sentMessages'][msgNum]['toAddress'])) # Get the to address) - # Get the from address - print(' From:', getLabelForAddress(outboxMessages['sentMessages'][msgNum]['fromAddress'])) - print(' Subject:', outboxMessages['sentMessages'][msgNum]['subject'].decode('base64')) # Get the subject) - print(' Status:', outboxMessages['sentMessages'][msgNum]['status']) # Get the subject) - print(''.join([ - ' Last Action Time:', - datetime.datetime.fromtimestamp( - float(outboxMessages['sentMessages'][msgNum]['lastActionTime'])).strftime('%Y-%m-%d %H:%M:%S'), - ])) - print(' Message:\n') - print(message) # inboxMessages['inboxMessages'][msgNum]['message'].decode('base64')) - print(' ') - - -def readMsg(msgNum): - """Open a message for reading""" - global usrPrompt - try: - inboxMessages = json.loads(api.getAllInboxMessages()) - numMessages = len(inboxMessages['inboxMessages']) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - if msgNum >= numMessages: - print('\n Invalid Message Number.\n') - main() - - # Begin attachment detection - message = inboxMessages['inboxMessages'][msgNum]['message'].decode('base64') - - while True: # Allows multiple messages to be downloaded/saved - if ';base64,' in message: # Found this text in the message, there is probably an attachment. - attPos = message.index(";base64,") # Finds the attachment position - attEndPos = message.index("' />") # Finds the end of the attachment - # attLen = attEndPos - attPos #Finds the length of the message - - if 'alt = "' in message: # We can get the filename too - fnPos = message.index('alt = "') # Finds position of the filename - fnEndPos = message.index('" src=') # Finds the end position - # fnLen = fnEndPos - fnPos #Finds the length of the filename - - fileName = message[fnPos + 7:fnEndPos] - else: - fnPos = attPos - fileName = 'Attachment' - - uInput = userInput( - '\n Attachment Detected. Would you like to save the attachment, (Y)es or (N)o?').lower() - if uInput in ("y", 'yes'): - - this_attachment = message[attPos + 9:attEndPos] - saveFile(fileName, this_attachment) - - message = message[:fnPos] + '~~' + message[attEndPos + 4:] - - else: - break - - # End attachment Detection - print('\n To:', getLabelForAddress(inboxMessages['inboxMessages'][msgNum]['toAddress'])) # Get the to address) - # Get the from address - print(' From:', getLabelForAddress(inboxMessages['inboxMessages'][msgNum]['fromAddress'])) - print(' Subject:', inboxMessages['inboxMessages'][msgNum]['subject'].decode('base64')) # Get the subject) - print(''.join([ - ' Received:', datetime.datetime.fromtimestamp( - float(inboxMessages['inboxMessages'][msgNum]['receivedTime'])).strftime('%Y-%m-%d %H:%M:%S'), - ])) - print(' Message:\n') - print(message) # inboxMessages['inboxMessages'][msgNum]['message'].decode('base64')) - print(' ') - return inboxMessages['inboxMessages'][msgNum]['msgid'] - - -def replyMsg(msgNum, forwardORreply): - """Allows you to reply to the message you are currently on. Saves typing in the addresses and subject.""" - - global usrPrompt - forwardORreply = forwardORreply.lower() # makes it lowercase - try: - inboxMessages = json.loads(api.getAllInboxMessages()) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - fromAdd = inboxMessages['inboxMessages'][msgNum]['toAddress'] # Address it was sent To, now the From address - message = inboxMessages['inboxMessages'][msgNum]['message'].decode('base64') # Message that you are replying too. - - subject = inboxMessages['inboxMessages'][msgNum]['subject'] - subject = subject.decode('base64') - - if forwardORreply == 'reply': - toAdd = inboxMessages['inboxMessages'][msgNum]['fromAddress'] # Address it was From, now the To address - subject = "Re: " + subject - - elif forwardORreply == 'forward': - subject = "Fwd: " + subject - - while True: - toAdd = userInput("What is the To Address?") - - if toAdd == "c": - usrPrompt = 1 - print(' ') - main() - elif validAddress(toAdd) is False: - print('\n Invalid Address. "c" to cancel. Please try again.\n') - else: - break - else: - print('\n Invalid Selection. Reply or Forward only') - usrPrompt = 0 - main() - - subject = subject.encode('base64') - - newMessage = userInput("Enter your Message.") - - uInput = userInput('Would you like to add an attachment, (Y)es or (N)o?').lower() - if uInput == "y": - newMessage = newMessage + '\n\n' + attachment() - - newMessage = newMessage + '\n\n------------------------------------------------------\n' - newMessage = newMessage + message - newMessage = newMessage.encode('base64') - - sendMsg(toAdd, fromAdd, subject, newMessage) - - main() - - -def delMsg(msgNum): - """Deletes a specified message from the inbox""" - - global usrPrompt - try: - inboxMessages = json.loads(api.getAllInboxMessages()) - # gets the message ID via the message index number - msgId = inboxMessages['inboxMessages'][int(msgNum)]['msgid'] - - msgAck = api.trashMessage(msgId) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - return msgAck - - -def delSentMsg(msgNum): - """Deletes a specified message from the outbox""" - - global usrPrompt - try: - outboxMessages = json.loads(api.getAllSentMessages()) - # gets the message ID via the message index number - msgId = outboxMessages['sentMessages'][int(msgNum)]['msgid'] - msgAck = api.trashSentMessage(msgId) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - return msgAck - - -def getLabelForAddress(address): - """Get label for an address""" - - if address in knownAddresses: - return knownAddresses[address] - else: - buildKnownAddresses() - if address in knownAddresses: - return knownAddresses[address] - - return address - - -def buildKnownAddresses(): - """Build known addresses""" - - global usrPrompt - - # add from address book - try: - response = api.listAddressBookEntries() - # if api is too old then fail - if "API Error 0020" in response: - return - addressBook = json.loads(response) - for entry in addressBook['addresses']: - if entry['address'] not in knownAddresses: - knownAddresses[entry['address']] = "%s (%s)" % (entry['label'].decode('base64'), entry['address']) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - # add from my addresses - try: - response = api.listAddresses2() - # if api is too old just return then fail - if "API Error 0020" in response: - return - addresses = json.loads(response) - for entry in addresses['addresses']: - if entry['address'] not in knownAddresses: - knownAddresses[entry['address']] = "%s (%s)" % (entry['label'].decode('base64'), entry['address']) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def listAddressBookEntries(): - """List addressbook entries""" - - global usrPrompt - - try: - response = api.listAddressBookEntries() - if "API Error" in response: - return getAPIErrorCode(response) - addressBook = json.loads(response) - print(' --------------------------------------------------------------') - print(' | Label | Address |') - print(' |--------------------|---------------------------------------|') - for entry in addressBook['addresses']: - label = entry['label'].decode('base64') - address = entry['address'] - if len(label) > 19: - label = label[:16] + '...' - print(' | ' + label.ljust(19) + '| ' + address.ljust(37) + ' |') - print(' --------------------------------------------------------------') - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def addAddressToAddressBook(address, label): - """Add an address to an addressbook""" - - global usrPrompt - - try: - response = api.addAddressBookEntry(address, label.encode('base64')) - if "API Error" in response: - return getAPIErrorCode(response) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def deleteAddressFromAddressBook(address): - """Delete an address from an addressbook""" - - global usrPrompt - - try: - response = api.deleteAddressBookEntry(address) - if "API Error" in response: - return getAPIErrorCode(response) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def getAPIErrorCode(response): - """Get API error code""" - - if "API Error" in response: - # if we got an API error return the number by getting the number - # after the second space and removing the trailing colon - return int(response.split()[2][:-1]) - - -def markMessageRead(messageID): - """Mark a message as read""" - - global usrPrompt - - try: - response = api.getInboxMessageByID(messageID, True) - if "API Error" in response: - return getAPIErrorCode(response) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def markMessageUnread(messageID): - """Mark a mesasge as unread""" - - global usrPrompt - - try: - response = api.getInboxMessageByID(messageID, False) - if "API Error" in response: - return getAPIErrorCode(response) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - -def markAllMessagesRead(): - """Mark all messages as read""" - - global usrPrompt - - try: - inboxMessages = json.loads(api.getAllInboxMessages())['inboxMessages'] - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - for message in inboxMessages: - if not message['read']: - markMessageRead(message['msgid']) - - -def markAllMessagesUnread(): - """Mark all messages as unread""" - - global usrPrompt - - try: - inboxMessages = json.loads(api.getAllInboxMessages())['inboxMessages'] - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - for message in inboxMessages: - if message['read']: - markMessageUnread(message['msgid']) - - -def clientStatus(): - """Print (the client status""" - - global usrPrompt - - try: - client_status = json.loads(api.clientStatus()) - except: # noqa:E722 - print('\n Connection Error\n') - usrPrompt = 0 - main() - - print("\nnetworkStatus: " + client_status['networkStatus'] + "\n") - print("\nnetworkConnections: " + str(client_status['networkConnections']) + "\n") - print("\nnumberOfPubkeysProcessed: " + str(client_status['numberOfPubkeysProcessed']) + "\n") - print("\nnumberOfMessagesProcessed: " + str(client_status['numberOfMessagesProcessed']) + "\n") - print("\nnumberOfBroadcastsProcessed: " + str(client_status['numberOfBroadcastsProcessed']) + "\n") - - -def shutdown(): - """Shutdown the API""" - - try: - api.shutdown() - except socket.error: - pass - print("\nShutdown command relayed\n") - - -def UI(usrInput): - """Main user menu""" - - global usrPrompt - - if usrInput in ("help", "h", "?"): - print(' ') - print(' -------------------------------------------------------------------------') - print(' | https://github.com/Dokument/PyBitmessage-Daemon |') - print(' |-----------------------------------------------------------------------|') - print(' | Command | Description |') - print(' |------------------------|----------------------------------------------|') - print(' | help | This help file. |') - print(' | apiTest | Tests the API |') - print(' | addInfo | Returns address information (If valid) |') - print(' | bmSettings | BitMessage settings |') - print(' | exit | Use anytime to return to main menu |') - print(' | quit | Quits the program |') - print(' |------------------------|----------------------------------------------|') - print(' | listAddresses | Lists all of the users addresses |') - print(' | generateAddress | Generates a new address |') - print(' | getAddress | Get determinist address from passphrase |') - print(' |------------------------|----------------------------------------------|') - print(' | listAddressBookEntries | Lists entries from the Address Book |') - print(' | addAddressBookEntry | Add address to the Address Book |') - print(' | deleteAddressBookEntry | Deletes address from the Address Book |') - print(' |------------------------|----------------------------------------------|') - print(' | subscribe | Subscribes to an address |') - print(' | unsubscribe | Unsubscribes from an address |') - print(' |------------------------|----------------------------------------------|') - print(' | create | Creates a channel |') - print(' | join | Joins a channel |') - print(' | leave | Leaves a channel |') - print(' |------------------------|----------------------------------------------|') - print(' | inbox | Lists the message information for the inbox |') - print(' | outbox | Lists the message information for the outbox |') - print(' | send | Send a new message or broadcast |') - print(' | unread | Lists all unread inbox messages |') - print(' | read | Reads a message from the inbox or outbox |') - print(' | save | Saves message to text file |') - print(' | delete | Deletes a message or all messages |') - print(' -------------------------------------------------------------------------') - print(' ') - main() - - elif usrInput == "apitest": # tests the API Connection. - if apiTest(): - print('\n API connection test has: PASSED\n') - else: - print('\n API connection test has: FAILED\n') - main() - - elif usrInput == "addinfo": - tmp_address = userInput('\nEnter the Bitmessage Address.') - address_information = json.loads(api.decodeAddress(tmp_address)) - - print('\n------------------------------') - - if 'success' in str(address_information['status']).lower(): - print(' Valid Address') - print(' Address Version: %s' % str(address_information['addressVersion'])) - print(' Stream Number: %s' % str(address_information['streamNumber'])) - else: - print(' Invalid Address !') - - print('------------------------------\n') - main() - - elif usrInput == "bmsettings": # tests the API Connection. - bmSettings() - print(' ') - main() - - elif usrInput == "quit": # Quits the application - print('\n Bye\n') - sys.exit(0) - - elif usrInput == "listaddresses": # Lists all of the identities in the addressbook - listAdd() - main() - - elif usrInput == "generateaddress": # Generates a new address - uInput = userInput('\nWould you like to create a (D)eterministic or (R)andom address?').lower() - - if uInput in ("d", "deterministic"): # Creates a deterministic address - deterministic = True - - lbl = '' - passphrase = userInput('Enter the Passphrase.') # .encode('base64') - numOfAdd = int(userInput('How many addresses would you like to generate?')) - addVNum = 3 - streamNum = 1 - isRipe = userInput('Shorten the address, (Y)es or (N)o?').lower() - - if isRipe == "y": - ripe = True - print(genAdd(lbl, deterministic, passphrase, numOfAdd, addVNum, streamNum, ripe)) - main() - elif isRipe == "n": - ripe = False - print(genAdd(lbl, deterministic, passphrase, numOfAdd, addVNum, streamNum, ripe)) - main() - elif isRipe == "exit": - usrPrompt = 1 - main() - else: - print('\n Invalid input\n') - main() - - elif uInput == "r" or uInput == "random": # Creates a random address with user-defined label - deterministic = False - null = '' - lbl = userInput('Enter the label for the new address.') - - print(genAdd(lbl, deterministic, null, null, null, null, null)) - main() - - else: - print('\n Invalid input\n') - main() - - elif usrInput == "getaddress": # Gets the address for/from a passphrase - phrase = userInput("Enter the address passphrase.") - print('\n Working...\n') - address = getAddress(phrase, 4, 1) # ,vNumber,sNumber) - print('\n Address: ' + address + '\n') - usrPrompt = 1 - main() - - elif usrInput == "subscribe": # Subsribe to an address - subscribe() - usrPrompt = 1 - main() - - elif usrInput == "unsubscribe": # Unsubscribe from an address - unsubscribe() - usrPrompt = 1 - main() - - elif usrInput == "listsubscriptions": # Unsubscribe from an address - listSubscriptions() - usrPrompt = 1 - main() - - elif usrInput == "create": - createChan() - usrPrompt = 1 - main() - - elif usrInput == "join": - joinChan() - usrPrompt = 1 - main() - - elif usrInput == "leave": - leaveChan() - usrPrompt = 1 - main() - - elif usrInput == "inbox": - print('\n Loading...\n') - inbox() - main() - - elif usrInput == "unread": - print('\n Loading...\n') - inbox(True) - main() - - elif usrInput == "outbox": - print('\n Loading...\n') - outbox() - main() - - elif usrInput == 'send': # Sends a message or broadcast - uInput = userInput('Would you like to send a (M)essage or (B)roadcast?').lower() - - if uInput in ('m', 'message'): - null = '' - sendMsg(null, null, null, null) - main() - elif uInput in ('b', 'broadcast'): - null = '' - sendBrd(null, null, null) - main() - - elif usrInput == "read": # Opens a message from the inbox for viewing. - - uInput = userInput("Would you like to read a message from the (I)nbox or (O)utbox?").lower() - - if uInput not in ('i', 'inbox', 'o', 'outbox'): - print('\n Invalid Input.\n') - usrPrompt = 1 - main() - - msgNum = int(userInput("What is the number of the message you wish to open?")) - - if uInput in ('i', 'inbox'): - print('\n Loading...\n') - messageID = readMsg(msgNum) - - uInput = userInput("\nWould you like to keep this message unread, (Y)es or (N)o?").lower() - - if uInput not in ('y', 'yes'): - markMessageRead(messageID) - usrPrompt = 1 - - uInput = userInput("\nWould you like to (D)elete, (F)orward, (R)eply to, or (Exit) this message?").lower() - - if uInput in ('r', 'reply'): - print('\n Loading...\n') - print(' ') - replyMsg(msgNum, 'reply') - usrPrompt = 1 - - elif uInput in ('f', 'forward'): - print('\n Loading...\n') - print(' ') - replyMsg(msgNum, 'forward') - usrPrompt = 1 - - elif uInput in ("d", 'delete'): - uInput = userInput("Are you sure, (Y)es or (N)o?").lower() # Prevent accidental deletion - - if uInput == "y": - delMsg(msgNum) - print('\n Message Deleted.\n') - usrPrompt = 1 - else: - usrPrompt = 1 - else: - print('\n Invalid entry\n') - usrPrompt = 1 - - elif uInput in ('o', 'outbox'): - readSentMsg(msgNum) - - # Gives the user the option to delete the message - uInput = userInput("Would you like to (D)elete, or (Exit) this message?").lower() - - if uInput in ("d", 'delete'): - uInput = userInput('Are you sure, (Y)es or (N)o?').lower() # Prevent accidental deletion - - if uInput == "y": - delSentMsg(msgNum) - print('\n Message Deleted.\n') - usrPrompt = 1 - else: - usrPrompt = 1 - else: - print('\n Invalid Entry\n') - usrPrompt = 1 - - main() - - elif usrInput == "save": - - uInput = userInput("Would you like to save a message from the (I)nbox or (O)utbox?").lower() - - if uInput not in ('i', 'inbox', 'o', 'outbox'): - print('\n Invalid Input.\n') - usrPrompt = 1 - main() - - if uInput in ('i', 'inbox'): - inboxMessages = json.loads(api.getAllInboxMessages()) - numMessages = len(inboxMessages['inboxMessages']) - - while True: - msgNum = int(userInput("What is the number of the message you wish to save?")) - - if msgNum >= numMessages: - print('\n Invalid Message Number.\n') - else: - break - - subject = inboxMessages['inboxMessages'][msgNum]['subject'].decode('base64') - # Don't decode since it is done in the saveFile function - message = inboxMessages['inboxMessages'][msgNum]['message'] - - elif uInput == 'o' or uInput == 'outbox': - outboxMessages = json.loads(api.getAllSentMessages()) - numMessages = len(outboxMessages['sentMessages']) - - while True: - msgNum = int(userInput("What is the number of the message you wish to save?")) - - if msgNum >= numMessages: - print('\n Invalid Message Number.\n') - else: - break - - subject = outboxMessages['sentMessages'][msgNum]['subject'].decode('base64') - # Don't decode since it is done in the saveFile function - message = outboxMessages['sentMessages'][msgNum]['message'] - - subject = subject + '.txt' - saveFile(subject, message) - - usrPrompt = 1 - main() - - elif usrInput == "delete": # will delete a message from the system, not reflected on the UI. - - uInput = userInput("Would you like to delete a message from the (I)nbox or (O)utbox?").lower() - - if uInput in ('i', 'inbox'): - inboxMessages = json.loads(api.getAllInboxMessages()) - numMessages = len(inboxMessages['inboxMessages']) - - while True: - msgNum = userInput( - 'Enter the number of the message you wish to delete or (A)ll to empty the inbox.').lower() - - if msgNum == 'a' or msgNum == 'all': - break - elif int(msgNum) >= numMessages: - print('\n Invalid Message Number.\n') - else: - break - - uInput = userInput("Are you sure, (Y)es or (N)o?").lower() # Prevent accidental deletion - - if uInput == "y": - if msgNum in ('a', 'all'): - print(' ') - for msgNum in range(0, numMessages): # processes all of the messages in the inbox - print(' Deleting message ', msgNum + 1, ' of ', numMessages) - delMsg(0) - - print('\n Inbox is empty.') - usrPrompt = 1 - else: - delMsg(int(msgNum)) - - print('\n Notice: Message numbers may have changed.\n') - main() - else: - usrPrompt = 1 - - elif uInput in ('o', 'outbox'): - outboxMessages = json.loads(api.getAllSentMessages()) - numMessages = len(outboxMessages['sentMessages']) - - while True: - msgNum = userInput( - 'Enter the number of the message you wish to delete or (A)ll to empty the inbox.').lower() - - if msgNum in ('a', 'all'): - break - elif int(msgNum) >= numMessages: - print('\n Invalid Message Number.\n') - else: - break - - uInput = userInput("Are you sure, (Y)es or (N)o?").lower() # Prevent accidental deletion - - if uInput == "y": - if msgNum in ('a', 'all'): - print(' ') - for msgNum in range(0, numMessages): # processes all of the messages in the outbox - print(' Deleting message ', msgNum + 1, ' of ', numMessages) - delSentMsg(0) - - print('\n Outbox is empty.') - usrPrompt = 1 - else: - delSentMsg(int(msgNum)) - print('\n Notice: Message numbers may have changed.\n') - main() - else: - usrPrompt = 1 - else: - print('\n Invalid Entry.\n') - usrPrompt = 1 - main() - - elif usrInput == "exit": - print('\n You are already at the main menu. Use "quit" to quit.\n') - usrPrompt = 1 - main() - - elif usrInput == "listaddressbookentries": - res = listAddressBookEntries() - if res == 20: - print('\n Error: API function not supported.\n') - usrPrompt = 1 - main() - - elif usrInput == "addaddressbookentry": - address = userInput('Enter address') - label = userInput('Enter label') - res = addAddressToAddressBook(address, label) - if res == 16: - print('\n Error: Address already exists in Address Book.\n') - if res == 20: - print('\n Error: API function not supported.\n') - usrPrompt = 1 - main() - - elif usrInput == "deleteaddressbookentry": - address = userInput('Enter address') - res = deleteAddressFromAddressBook(address) - if res == 20: - print('\n Error: API function not supported.\n') - usrPrompt = 1 - main() - - elif usrInput == "markallmessagesread": - markAllMessagesRead() - usrPrompt = 1 - main() - - elif usrInput == "markallmessagesunread": - markAllMessagesUnread() - usrPrompt = 1 - main() - - elif usrInput == "status": - clientStatus() - usrPrompt = 1 - main() - - elif usrInput == "shutdown": - shutdown() - usrPrompt = 1 - main() - - else: - print('\n "', usrInput, '" is not a command.\n') - usrPrompt = 1 - main() - - -def main(): - """Entrypoint for the CLI app""" - - global api - global usrPrompt - - if usrPrompt == 0: - print('\n ------------------------------') - print(' | Bitmessage Daemon by .dok |') - print(' | Version 0.3.1 for BM 0.6.2 |') - print(' ------------------------------') - api = xmlrpclib.ServerProxy(apiData()) # Connect to BitMessage using these api credentials - - if apiTest() is False: - print('\n ****************************************************************') - print(' WARNING: You are not connected to the Bitmessage client.') - print(' Either Bitmessage is not running or your settings are incorrect.') - print(' Use the command "apiTest" or "bmSettings" to resolve this issue.') - print(' ****************************************************************\n') - - print('Type (H)elp for a list of commands.') # Startup message) - usrPrompt = 2 - - elif usrPrompt == 1: - print('\nType (H)elp for a list of commands.') # Startup message) - usrPrompt = 2 - - try: - UI((raw_input('>').lower()).replace(" ", "")) - except EOFError: - UI("quit") - - -if __name__ == "__main__": - main() diff --git a/src/tests/mock/pybitmessage/bitmessagemain.py b/src/tests/mock/pybitmessage/bitmessagemain.py deleted file mode 100755 index 84313ab9..00000000 --- a/src/tests/mock/pybitmessage/bitmessagemain.py +++ /dev/null @@ -1,431 +0,0 @@ -#!/usr/bin/env python -""" -The PyBitmessage startup script -""" -# Copyright (c) 2012-2016 Jonathan Warren -# Copyright (c) 2012-2020 The Bitmessage developers -# Distributed under the MIT/X11 software license. See the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# Right now, PyBitmessage only support connecting to stream 1. It doesn't -# yet contain logic to expand into further streams. -import os -import sys - -try: - import pathmagic -except ImportError: - from pybitmessage import pathmagic -app_dir = pathmagic.setup() - -import depends -depends.check_dependencies() - -import getopt -import multiprocessing -# Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully. -import signal -import threading -import time -import traceback - -import defaults -import shared -import shutdown -import state - -from testmode_init import populate_api_test_data -from bmconfigparser import BMConfigParser -from debug import logger # this should go before any threads -from helper_startup import ( - adjustHalfOpenConnectionsLimit, fixSocket, start_proxyconfig) -from inventory import Inventory -# Network objects and threads -from network import ( - BMConnectionPool, Dandelion, AddrThread, AnnounceThread, BMNetworkThread, - InvThread, ReceiveQueueThread, DownloadThread, UploadThread -) -from network.knownnodes import readKnownNodes -from singleinstance import singleinstance -# Synchronous threads -from threads import ( - set_thread_name, printLock, - addressGenerator, objectProcessor, singleCleaner, singleWorker, sqlThread) - - -def signal_handler(signum, frame): - """Single handler for any signal sent to pybitmessage""" - process = multiprocessing.current_process() - thread = threading.current_thread() - logger.error( - 'Got signal %i in %s/%s', - signum, process.name, thread.name - ) - if process.name == "RegExParser": - # on Windows this isn't triggered, but it's fine, - # it has its own process termination thing - raise SystemExit - if "PoolWorker" in process.name: - raise SystemExit - if thread.name not in ("PyBitmessage", "MainThread"): - return - logger.error("Got signal %i", signum) - # there are possible non-UI variants to run bitmessage - # which should shutdown especially test-mode - if state.thisapp.daemon or not state.enableGUI: - shutdown.doCleanShutdown() - else: - print('# Thread: %s(%d)' % (thread.name, thread.ident)) - for filename, lineno, name, line in traceback.extract_stack(frame): - print('File: "%s", line %d, in %s' % (filename, lineno, name)) - if line: - print(' %s' % line.strip()) - print('Unfortunately you cannot use Ctrl+C when running the UI' - ' because the UI captures the signal.') - - -class Main(object): - """Main PyBitmessage class""" - def start(self): - """Start main application""" - # pylint: disable=too-many-statements,too-many-branches,too-many-locals - fixSocket() - adjustHalfOpenConnectionsLimit() - - config = BMConfigParser() - daemon = config.safeGetBoolean('bitmessagesettings', 'daemon') - - try: - opts, _ = getopt.getopt( - sys.argv[1:], "hcdt", - ["help", "curses", "daemon", "test"]) - - except getopt.GetoptError: - self.usage() - sys.exit(2) - - for opt, _ in opts: - if opt in ("-h", "--help"): - self.usage() - sys.exit() - elif opt in ("-d", "--daemon"): - daemon = True - elif opt in ("-c", "--curses"): - state.curses = True - elif opt in ("-t", "--test"): - state.testmode = True - if os.path.isfile(os.path.join( - state.appdata, 'unittest.lock')): - daemon = True - state.enableGUI = False # run without a UI - # Fallback: in case when no api command was issued - state.last_api_response = time.time() - # Apply special settings - config.set( - 'bitmessagesettings', 'apienabled', 'true') - config.set( - 'bitmessagesettings', 'apiusername', 'username') - config.set( - 'bitmessagesettings', 'apipassword', 'password') - config.set( - 'bitmessagesettings', 'apivariant', 'legacy') - config.set( - 'bitmessagesettings', 'apinotifypath', - os.path.join(app_dir, 'tests', 'apinotify_handler.py') - ) - - if daemon: - state.enableGUI = False # run without a UI - - if state.enableGUI and not state.curses and not depends.check_pyqt(): - sys.exit( - 'PyBitmessage requires PyQt unless you want' - ' to run it as a daemon and interact with it' - ' using the API. You can download PyQt from ' - 'http://www.riverbankcomputing.com/software/pyqt/download' - ' or by searching Google for \'PyQt Download\'.' - ' If you want to run in daemon mode, see ' - 'https://bitmessage.org/wiki/Daemon\n' - 'You can also run PyBitmessage with' - ' the new curses interface by providing' - ' \'-c\' as a commandline argument.' - ) - # is the application already running? If yes then exit. - state.thisapp = singleinstance("", daemon) - - if daemon: - with printLock: - print('Running as a daemon. Send TERM signal to end.') - self.daemonize() - - self.setSignalHandler() - - set_thread_name("PyBitmessage") - - state.dandelion = config.safeGetInt('network', 'dandelion') - # dandelion requires outbound connections, without them, - # stem objects will get stuck forever - if state.dandelion and not config.safeGetBoolean( - 'bitmessagesettings', 'sendoutgoingconnections'): - state.dandelion = 0 - - if state.testmode or config.safeGetBoolean( - 'bitmessagesettings', 'extralowdifficulty'): - defaults.networkDefaultProofOfWorkNonceTrialsPerByte = int( - defaults.networkDefaultProofOfWorkNonceTrialsPerByte / 100) - defaults.networkDefaultPayloadLengthExtraBytes = int( - defaults.networkDefaultPayloadLengthExtraBytes / 100) - - readKnownNodes() - - # Not needed if objproc is disabled - if state.enableObjProc: - - # Start the address generation thread - addressGeneratorThread = addressGenerator() - # close the main program even if there are threads left - addressGeneratorThread.daemon = True - addressGeneratorThread.start() - - # Start the thread that calculates POWs - singleWorkerThread = singleWorker() - # close the main program even if there are threads left - singleWorkerThread.daemon = True - singleWorkerThread.start() - - # Start the SQL thread - sqlLookup = sqlThread() - # DON'T close the main program even if there are threads left. - # The closeEvent should command this thread to exit gracefully. - sqlLookup.daemon = False - sqlLookup.start() - - Inventory() # init - # init, needs to be early because other thread may access it early - Dandelion() - - # Enable object processor and SMTP only if objproc enabled - if state.enableObjProc: - - # SMTP delivery thread - if daemon and config.safeGet( - 'bitmessagesettings', 'smtpdeliver', '') != '': - from class_smtpDeliver import smtpDeliver - smtpDeliveryThread = smtpDeliver() - smtpDeliveryThread.start() - - # SMTP daemon thread - if daemon and config.safeGetBoolean( - 'bitmessagesettings', 'smtpd'): - from class_smtpServer import smtpServer - smtpServerThread = smtpServer() - smtpServerThread.start() - - # Start the thread that calculates POWs - objectProcessorThread = objectProcessor() - # DON'T close the main program even the thread remains. - # This thread checks the shutdown variable after processing - # each object. - objectProcessorThread.daemon = False - objectProcessorThread.start() - - # Start the cleanerThread - singleCleanerThread = singleCleaner() - # close the main program even if there are threads left - singleCleanerThread.daemon = True - singleCleanerThread.start() - - # Not needed if objproc disabled - if state.enableObjProc: - shared.reloadMyAddressHashes() - shared.reloadBroadcastSendersForWhichImWatching() - - # API is also objproc dependent - if config.safeGetBoolean('bitmessagesettings', 'apienabled'): - import api # pylint: disable=relative-import - singleAPIThread = api.singleAPI() - # close the main program even if there are threads left - singleAPIThread.daemon = True - singleAPIThread.start() - - # start network components if networking is enabled - if state.enableNetwork: - start_proxyconfig() - BMConnectionPool().connectToStream(1) - asyncoreThread = BMNetworkThread() - asyncoreThread.daemon = True - asyncoreThread.start() - for i in range(config.getint('threads', 'receive')): - receiveQueueThread = ReceiveQueueThread(i) - receiveQueueThread.daemon = True - receiveQueueThread.start() - if config.safeGetBoolean('bitmessagesettings', 'udp'): - state.announceThread = AnnounceThread() - state.announceThread.daemon = True - state.announceThread.start() - state.invThread = InvThread() - state.invThread.daemon = True - state.invThread.start() - state.addrThread = AddrThread() - state.addrThread.daemon = True - state.addrThread.start() - state.downloadThread = DownloadThread() - state.downloadThread.daemon = True - state.downloadThread.start() - state.uploadThread = UploadThread() - state.uploadThread.daemon = True - state.uploadThread.start() - - if config.safeGetBoolean('bitmessagesettings', 'upnp'): - import upnp - upnpThread = upnp.uPnPThread() - upnpThread.start() - else: - # Populate with hardcoded value (same as connectToStream above) - state.streamsInWhichIAmParticipating.append(1) - - if not daemon and state.enableGUI: - if state.curses: - if not depends.check_curses(): - sys.exit() - print('Running with curses') - import bitmessagecurses - bitmessagecurses.runwrapper() - else: - import bitmessageqt - bitmessageqt.run() - else: - config.remove_option('bitmessagesettings', 'dontconnect') - - if state.testmode: - populate_api_test_data() - - if daemon: - while state.shutdown == 0: - time.sleep(1) - if ( - state.testmode - and time.time() - state.last_api_response >= 30 - ): - self.stop() - elif not state.enableGUI: - state.enableGUI = True - try: - # pylint: disable=relative-import - from tests import core as test_core - except ImportError: - self.stop() - return - - test_core_result = test_core.run() - self.stop() - test_core.cleanup() - sys.exit(not test_core_result.wasSuccessful()) - - @staticmethod - def daemonize(): - """Running as a daemon. Send signal in end.""" - grandfatherPid = os.getpid() - parentPid = None - try: - if os.fork(): - # unlock - state.thisapp.cleanup() - # wait until grandchild ready - while True: - time.sleep(1) - os._exit(0) # pylint: disable=protected-access - except AttributeError: - # fork not implemented - pass - else: - parentPid = os.getpid() - state.thisapp.lock() # relock - - os.umask(0) - try: - os.setsid() - except AttributeError: - # setsid not implemented - pass - try: - if os.fork(): - # unlock - state.thisapp.cleanup() - # wait until child ready - while True: - time.sleep(1) - os._exit(0) # pylint: disable=protected-access - except AttributeError: - # fork not implemented - pass - else: - state.thisapp.lock() # relock - state.thisapp.lockPid = None # indicate we're the final child - sys.stdout.flush() - sys.stderr.flush() - if not sys.platform.startswith('win'): - si = open(os.devnull, 'r') - so = open(os.devnull, 'a+') - se = open(os.devnull, 'a+', 0) - os.dup2(si.fileno(), sys.stdin.fileno()) - os.dup2(so.fileno(), sys.stdout.fileno()) - os.dup2(se.fileno(), sys.stderr.fileno()) - if parentPid: - # signal ready - os.kill(parentPid, signal.SIGTERM) - os.kill(grandfatherPid, signal.SIGTERM) - - @staticmethod - def setSignalHandler(): - """Setting the Signal Handler""" - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - # signal.signal(signal.SIGINT, signal.SIG_DFL) - - @staticmethod - def usage(): - """Displaying the usages""" - print('Usage: ' + sys.argv[0] + ' [OPTIONS]') - print(''' -Options: - -h, --help show this help message and exit - -c, --curses use curses (text mode) interface - -d, --daemon run in daemon (background) mode - -t, --test dryrun, make testing - -All parameters are optional. -''') - - @staticmethod - def stop(): - """Stop main application""" - with printLock: - print('Stopping Bitmessage Deamon.') - shutdown.doCleanShutdown() - - # .. todo:: nice function but no one is using this - @staticmethod - def getApiAddress(): - """This function returns API address and port""" - if not BMConfigParser().safeGetBoolean( - 'bitmessagesettings', 'apienabled'): - return None - address = BMConfigParser().get('bitmessagesettings', 'apiinterface') - port = BMConfigParser().getint('bitmessagesettings', 'apiport') - return {'address': address, 'port': port} - - -def main(): - """Triggers main module""" - mainprogram = Main() - mainprogram.start() - - -if __name__ == "__main__": - main() - - -# So far, the creation of and management of the Bitmessage protocol and this -# client is a one-man operation. Bitcoin tips are quite appreciated. -# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u diff --git a/src/tests/mock/pybitmessage/build_osx.py b/src/tests/mock/pybitmessage/build_osx.py deleted file mode 100644 index 83d2f280..00000000 --- a/src/tests/mock/pybitmessage/build_osx.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Building osx.""" -import os -from glob import glob -from PyQt4 import QtCore -from setuptools import setup - -name = "Bitmessage" -version = os.getenv("PYBITMESSAGEVERSION", "custom") -mainscript = ["bitmessagemain.py"] - -DATA_FILES = [ - ('', ['sslkeys', 'images']), - ('bitmsghash', ['bitmsghash/bitmsghash.cl', 'bitmsghash/bitmsghash.so']), - ('translations', glob('translations/*.qm')), - ('ui', glob('bitmessageqt/*.ui')), - ( - 'translations', - glob(os.path.join(str(QtCore.QLibraryInfo.location( - QtCore.QLibraryInfo.TranslationsPath)), 'qt_??.qm'))), - ( - 'translations', - glob(os.path.join(str(QtCore.QLibraryInfo.location( - QtCore.QLibraryInfo.TranslationsPath)), 'qt_??_??.qm'))), -] - -setup( - name=name, - version=version, - app=mainscript, - data_files=DATA_FILES, - setup_requires=["py2app"], - options=dict( - py2app=dict( - includes=['sip', 'PyQt4._qt'], - iconfile="images/bitmessage.icns" - ) - ) -) diff --git a/src/tests/mock/pybitmessage/class_addressGenerator.py b/src/tests/mock/pybitmessage/class_addressGenerator.py index 0aaed140..b86e9278 100644 --- a/src/tests/mock/pybitmessage/class_addressGenerator.py +++ b/src/tests/mock/pybitmessage/class_addressGenerator.py @@ -13,7 +13,7 @@ from pybitmessage import queues from pybitmessage.bmconfigparser import BMConfigParser -# from network.threads import StoppableThread +from pybitmessage.threads import StoppableThread fake_addresses = { @@ -40,32 +40,13 @@ fake_addresses = { } -class StoppableThread(threading.Thread): - """Base class for application threads with stopThread method""" - name = None - logger = logging.getLogger('default') - - def __init__(self, name=None): - if name: - self.name = name - super(StoppableThread, self).__init__(name=self.name) - self.stop = threading.Event() - self._stopped = False - random.seed() - self.logger.info('Init thread %s', self.name) - - def stopThread(self): - """Stop the thread""" - self._stopped = True - self.stop.set() - - class addressGenerator(StoppableThread): """A thread for creating fake addresses""" name = "addressGenerator" address_list = list(fake_addresses.keys()) def stopThread(self): + try: queues.addressGeneratorQueue.put(("stopThread", "data")) except queue.Full: @@ -77,6 +58,7 @@ class addressGenerator(StoppableThread): Process the requests for addresses generation from `.queues.addressGeneratorQueue` """ + import pdb;pdb.set_trace() while state.shutdown == 0: queueValue = queues.addressGeneratorQueue.get() try: diff --git a/src/tests/mock/pybitmessage/class_objectProcessor.py b/src/tests/mock/pybitmessage/class_objectProcessor.py index 09120fc6..8f6f072d 100644 --- a/src/tests/mock/pybitmessage/class_objectProcessor.py +++ b/src/tests/mock/pybitmessage/class_objectProcessor.py @@ -9,8 +9,8 @@ import threading import queues import state -from helper_sql import sql_ready, sqlExecute, sqlQuery -from network import bmproto +# from helper_sql import sql_ready, sqlExecute, sqlQuery +# from network import bmproto logger = logging.getLogger('default') @@ -28,16 +28,17 @@ class objectProcessor(threading.Thread): # objectProcessorQueue. Assuming that Bitmessage wasn't closed # forcefully, it should have saved the data in the queue into the # objectprocessorqueue table. Let's pull it out. - sql_ready.wait() - queryreturn = sqlQuery( - 'SELECT objecttype, data FROM objectprocessorqueue') - for objectType, data in queryreturn: - queues.objectProcessorQueue.put((objectType, data)) - sqlExecute('DELETE FROM objectprocessorqueue') - logger.debug( - 'Loaded %s objects from disk into the objectProcessorQueue.', - len(queryreturn)) - self._ack_obj = bmproto.BMStringParser() + + # sql_ready.wait() + # queryreturn = sqlQuery( + # 'SELECT objecttype, data FROM objectprocessorqueue') + # for objectType, data in queryreturn: + # queues.objectProcessorQueue.put((objectType, data)) + # sqlExecute('DELETE FROM objectprocessorqueue') + # logger.debug( + # 'Loaded %s objects from disk into the objectProcessorQueue.', + # len(queryreturn)) + # self._ack_obj = bmproto.BMStringParser() self.successfullyDecryptMessageTimings = [] def run(self): diff --git a/src/tests/mock/pybitmessage/class_singleCleaner.py b/src/tests/mock/pybitmessage/class_singleCleaner.py deleted file mode 100644 index 3f3f8ec0..00000000 --- a/src/tests/mock/pybitmessage/class_singleCleaner.py +++ /dev/null @@ -1,187 +0,0 @@ -""" -The `singleCleaner` class is a timer-driven thread that cleans data structures -to free memory, resends messages when a remote node doesn't respond, and -sends pong messages to keep connections alive if the network isn't busy. - -It cleans these data structures in memory: - - inventory (moves data to the on-disk sql database) - - inventorySets (clears then reloads data out of sql database) - -It cleans these tables on the disk: - - inventory (clears expired objects) - - pubkeys (clears pubkeys older than 4 weeks old which we have not used - personally) - - knownNodes (clears addresses which have not been online for over 3 days) - -It resends messages when there has been no response: - - resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...) - - resends msg messages in 5 days (then 10 days, then 20 days, etc...) - -""" - -import gc -import os -import time - -import queues -import state -from bmconfigparser import BMConfigParser -from helper_sql import sqlExecute, sqlQuery -from inventory import Inventory -from network import BMConnectionPool, knownnodes, StoppableThread -from tr import _translate - - -#: Equals 4 weeks. You could make this longer if you want -#: but making it shorter would not be advisable because -#: there is a very small possibility that it could keep you -#: from obtaining a needed pubkey for a period of time. -lengthOfTimeToHoldOnToAllPubkeys = 2419200 - - -class singleCleaner(StoppableThread): - """The singleCleaner thread class""" - name = "singleCleaner" - cycleLength = 300 - expireDiscoveredPeers = 300 - - def run(self): # pylint: disable=too-many-branches - gc.disable() - timeWeLastClearedInventoryAndPubkeysTables = 0 - try: - state.maximumLengthOfTimeToBotherResendingMessages = ( - BMConfigParser().getfloat( - 'bitmessagesettings', 'stopresendingafterxdays') - * 24 * 60 * 60 - ) + ( - BMConfigParser().getfloat( - 'bitmessagesettings', 'stopresendingafterxmonths') - * (60 * 60 * 24 * 365) / 12) - except: # noqa:E722 - # Either the user hasn't set stopresendingafterxdays and - # stopresendingafterxmonths yet or the options are missing - # from the config file. - state.maximumLengthOfTimeToBotherResendingMessages = float('inf') - - while state.shutdown == 0: - self.stop.wait(self.cycleLength) - queues.UISignalQueue.put(( - 'updateStatusBar', - 'Doing housekeeping (Flushing inventory in memory to disk...)' - )) - Inventory().flush() - queues.UISignalQueue.put(('updateStatusBar', '')) - - # If we are running as a daemon then we are going to fill up the UI - # queue which will never be handled by a UI. We should clear it to - # save memory. - # FIXME redundant? - if state.thisapp.daemon or not state.enableGUI: - queues.UISignalQueue.queue.clear() - - tick = int(time.time()) - if timeWeLastClearedInventoryAndPubkeysTables < tick - 7380: - timeWeLastClearedInventoryAndPubkeysTables = tick - Inventory().clean() - queues.workerQueue.put(('sendOnionPeerObj', '')) - # pubkeys - sqlExecute( - "DELETE FROM pubkeys WHERE time?)", - tick, - tick - state.maximumLengthOfTimeToBotherResendingMessages - ) - for toAddress, ackData, status in queryreturn: - if status == 'awaitingpubkey': - self.resendPubkeyRequest(toAddress) - elif status == 'msgsent': - self.resendMsg(ackData) - - try: - # Cleanup knownnodes and handle possible severe exception - # while writing it to disk - knownnodes.cleanupKnownNodes() - except Exception as err: - if "Errno 28" in str(err): - self.logger.fatal( - '(while writing knownnodes to disk)' - ' Alert: Your disk or data storage volume is full.' - ) - queues.UISignalQueue.put(( - 'alert', - (_translate("MainWindow", "Disk full"), - _translate( - "MainWindow", - 'Alert: Your disk or data storage volume' - ' is full. Bitmessage will now exit.'), - True) - )) - # FIXME redundant? - if state.thisapp.daemon or not state.enableGUI: - os._exit(1) # pylint: disable=protected-access - - # inv/object tracking - for connection in BMConnectionPool().connections(): - connection.clean() - - # discovery tracking - exp = time.time() - singleCleaner.expireDiscoveredPeers - reaper = (k for k, v in state.discoveredPeers.items() if v < exp) - for k in reaper: - try: - del state.discoveredPeers[k] - except KeyError: - pass - # ..todo:: cleanup pending upload / download - - gc.collect() - - def resendPubkeyRequest(self, address): - """Resend pubkey request for address""" - self.logger.debug( - 'It has been a long time and we haven\'t heard a response to our' - ' getpubkey request. Sending again.' - ) - try: - # We need to take this entry out of the neededPubkeys structure - # because the queues.workerQueue checks to see whether the entry - # is already present and will not do the POW and send the message - # because it assumes that it has already done it recently. - del state.neededPubkeys[address] - except KeyError: - pass - except RuntimeError: - self.logger.warning( - "Can't remove %s from neededPubkeys, requesting pubkey will be delayed", address, exc_info=True) - - queues.UISignalQueue.put(( - 'updateStatusBar', - 'Doing work necessary to again attempt to request a public key...' - )) - sqlExecute( - "UPDATE sent SET status = 'msgqueued'" - " WHERE toaddress = ? AND folder = 'sent'", address) - queues.workerQueue.put(('sendmessage', '')) - - def resendMsg(self, ackdata): - """Resend message by ackdata""" - self.logger.debug( - 'It has been a long time and we haven\'t heard an acknowledgement' - ' to our msg. Sending again.' - ) - sqlExecute( - "UPDATE sent SET status = 'msgqueued'" - " WHERE ackdata = ? AND folder = 'sent'", ackdata) - queues.workerQueue.put(('sendmessage', '')) - queues.UISignalQueue.put(( - 'updateStatusBar', - 'Doing work necessary to again attempt to deliver a message...' - )) diff --git a/src/tests/mock/pybitmessage/class_singleWorker.py b/src/tests/mock/pybitmessage/class_singleWorker.py index f60b1f05..924db8eb 100644 --- a/src/tests/mock/pybitmessage/class_singleWorker.py +++ b/src/tests/mock/pybitmessage/class_singleWorker.py @@ -4,11 +4,10 @@ Thread for performing PoW from __future__ import division -import proofofwork -import queues -import state +from pybitmessage import state +from pybitmessage import queues -from network import StoppableThread +from pybitmessage.threads import StoppableThread from six.moves import queue @@ -16,8 +15,7 @@ class singleWorker(StoppableThread): """Thread for performing PoW""" def __init__(self): - super(MockSingleWorker, self).__init__(name="singleWorker") - proofofwork.init() + super(singleWorker, self).__init__(name="singleWorker") self.busy = None def stopThread(self): @@ -27,7 +25,7 @@ class singleWorker(StoppableThread): queues.workerQueue.put(("stopThread", "data")) except queue.Full: self.logger.error('workerQueue is Full') - super(MockSingleWorker, self).stopThread() + super(singleWorker, self).stopThread() def run(self): diff --git a/src/tests/mock/pybitmessage/class_smtpDeliver.py b/src/tests/mock/pybitmessage/class_smtpDeliver.py deleted file mode 100644 index 08cb35ab..00000000 --- a/src/tests/mock/pybitmessage/class_smtpDeliver.py +++ /dev/null @@ -1,117 +0,0 @@ -""" -SMTP client thread for delivering emails -""" -# pylint: disable=unused-variable - -import smtplib -import urlparse -from email.header import Header -from email.mime.text import MIMEText - -import queues -import state -from bmconfigparser import BMConfigParser -from network.threads import StoppableThread - -SMTPDOMAIN = "bmaddr.lan" - - -class smtpDeliver(StoppableThread): - """SMTP client thread for delivery""" - name = "smtpDeliver" - _instance = None - - def stopThread(self): - # pylint: disable=no-member - try: - queues.UISignallerQueue.put(("stopThread", "data")) - except: # noqa:E722 - pass - super(smtpDeliver, self).stopThread() - - @classmethod - def get(cls): - """(probably) Singleton functionality""" - if not cls._instance: - cls._instance = smtpDeliver() - return cls._instance - - def run(self): - # pylint: disable=too-many-branches,too-many-statements,too-many-locals - # pylint: disable=deprecated-lambda - while state.shutdown == 0: - command, data = queues.UISignalQueue.get() - if command == 'writeNewAddressToTable': - label, address, streamNumber = data - elif command == 'updateStatusBar': - pass - elif command == 'updateSentItemStatusByToAddress': - toAddress, message = data - elif command == 'updateSentItemStatusByAckdata': - ackData, message = data - elif command == 'displayNewInboxMessage': - inventoryHash, toAddress, fromAddress, subject, body = data - dest = BMConfigParser().safeGet("bitmessagesettings", "smtpdeliver", '') - if dest == '': - continue - try: - u = urlparse.urlparse(dest) - to = urlparse.parse_qs(u.query)['to'] - client = smtplib.SMTP(u.hostname, u.port) - msg = MIMEText(body, 'plain', 'utf-8') - msg['Subject'] = Header(subject, 'utf-8') - msg['From'] = fromAddress + '@' + SMTPDOMAIN - toLabel = map( - lambda y: BMConfigParser().safeGet(y, "label"), - filter( - lambda x: x == toAddress, BMConfigParser().addresses()) - ) - if toLabel: - msg['To'] = "\"%s\" <%s>" % (Header(toLabel[0], 'utf-8'), toAddress + '@' + SMTPDOMAIN) - else: - msg['To'] = toAddress + '@' + SMTPDOMAIN - client.ehlo() - client.starttls() - client.ehlo() - client.sendmail(msg['From'], [to], msg.as_string()) - self.logger.info( - 'Delivered via SMTP to %s through %s:%i ...', - to, u.hostname, u.port) - client.quit() - except: # noqa:E722 - self.logger.error('smtp delivery error', exc_info=True) - elif command == 'displayNewSentMessage': - toAddress, fromLabel, fromAddress, subject, message, ackdata = data - elif command == 'updateNetworkStatusTab': - pass - elif command == 'updateNumberOfMessagesProcessed': - pass - elif command == 'updateNumberOfPubkeysProcessed': - pass - elif command == 'updateNumberOfBroadcastsProcessed': - pass - elif command == 'setStatusIcon': - pass - elif command == 'changedInboxUnread': - pass - elif command == 'rerenderMessagelistFromLabels': - pass - elif command == 'rerenderMessagelistToLabels': - pass - elif command == 'rerenderAddressBook': - pass - elif command == 'rerenderSubscriptions': - pass - elif command == 'rerenderBlackWhiteList': - pass - elif command == 'removeInboxRowByMsgid': - pass - elif command == 'newVersionAvailable': - pass - elif command == 'alert': - title, text, exitAfterUserClicksOk = data - elif command == 'stopThread': - break - else: - self.logger.warning( - 'Command sent to smtpDeliver not recognized: %s', command) diff --git a/src/tests/mock/pybitmessage/class_smtpServer.py b/src/tests/mock/pybitmessage/class_smtpServer.py deleted file mode 100644 index f5b63c2e..00000000 --- a/src/tests/mock/pybitmessage/class_smtpServer.py +++ /dev/null @@ -1,217 +0,0 @@ -""" -SMTP server thread -""" -import asyncore -import base64 -import email -import logging -import re -import signal -import smtpd -import threading -import time -from email.header import decode_header -from email.parser import Parser - -import queues -from addresses import decodeAddress -from bmconfigparser import BMConfigParser -from helper_ackPayload import genAckPayload -from helper_sql import sqlExecute -from network.threads import StoppableThread -from version import softwareVersion - -SMTPDOMAIN = "bmaddr.lan" -LISTENPORT = 8425 - -logger = logging.getLogger('default') -# pylint: disable=attribute-defined-outside-init - - -class SmtpServerChannelException(Exception): - """Generic smtp server channel exception.""" - pass - - -class smtpServerChannel(smtpd.SMTPChannel): - """Asyncore channel for SMTP protocol (server)""" - def smtp_EHLO(self, arg): - """Process an EHLO""" - if not arg: - self.push('501 Syntax: HELO hostname') - return - self.push('250-PyBitmessage %s' % softwareVersion) - self.push('250 AUTH PLAIN') - - def smtp_AUTH(self, arg): - """Process AUTH""" - if not arg or arg[0:5] not in ["PLAIN"]: - self.push('501 Syntax: AUTH PLAIN') - return - authstring = arg[6:] - try: - decoded = base64.b64decode(authstring) - correctauth = "\x00" + BMConfigParser().safeGet( - "bitmessagesettings", "smtpdusername", "") + "\x00" + BMConfigParser().safeGet( - "bitmessagesettings", "smtpdpassword", "") - logger.debug('authstring: %s / %s', correctauth, decoded) - if correctauth == decoded: - self.auth = True - self.push('235 2.7.0 Authentication successful') - else: - raise SmtpServerChannelException("Auth fail") - except: # noqa:E722 - self.push('501 Authentication fail') - - def smtp_DATA(self, arg): - """Process DATA""" - if not hasattr(self, "auth") or not self.auth: - self.push('530 Authentication required') - return - smtpd.SMTPChannel.smtp_DATA(self, arg) - - -class smtpServerPyBitmessage(smtpd.SMTPServer): - """Asyncore SMTP server class""" - def handle_accept(self): - """Accept a connection""" - pair = self.accept() - if pair is not None: - conn, addr = pair - self.channel = smtpServerChannel(self, conn, addr) - - def send(self, fromAddress, toAddress, subject, message): - """Send a bitmessage""" - # pylint: disable=arguments-differ - streamNumber, ripe = decodeAddress(toAddress)[2:] - stealthLevel = BMConfigParser().safeGetInt('bitmessagesettings', 'ackstealthlevel') - ackdata = genAckPayload(streamNumber, stealthLevel) - sqlExecute( - '''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', - '', - toAddress, - ripe, - fromAddress, - subject, - message, - ackdata, - int(time.time()), # sentTime (this will never change) - int(time.time()), # lastActionTime - 0, # sleepTill time. This will get set when the POW gets done. - 'msgqueued', - 0, # retryNumber - 'sent', # folder - 2, # encodingtype - # not necessary to have a TTL higher than 2 days - min(BMConfigParser().getint('bitmessagesettings', 'ttl'), 86400 * 2) - ) - - queues.workerQueue.put(('sendmessage', toAddress)) - - def decode_header(self, hdr): - """Email header decoding""" - ret = [] - for h in decode_header(self.msg_headers[hdr]): - if h[1]: - ret.append(h[0].decode(h[1])) - else: - ret.append(h[0].decode("utf-8", errors='replace')) - - return ret - - def process_message(self, peer, mailfrom, rcpttos, data): - """Process an email""" - # pylint: disable=too-many-locals, too-many-branches - p = re.compile(".*<([^>]+)>") - if not hasattr(self.channel, "auth") or not self.channel.auth: - logger.error('Missing or invalid auth') - return - try: - self.msg_headers = Parser().parsestr(data) - except: # noqa:E722 - logger.error('Invalid headers') - return - - try: - sender, domain = p.sub(r'\1', mailfrom).split("@") - if domain != SMTPDOMAIN: - raise Exception("Bad domain %s" % domain) - if sender not in BMConfigParser().addresses(): - raise Exception("Nonexisting user %s" % sender) - except Exception as err: - logger.debug('Bad envelope from %s: %r', mailfrom, err) - msg_from = self.decode_header("from") - try: - msg_from = p.sub(r'\1', self.decode_header("from")[0]) - sender, domain = msg_from.split("@") - if domain != SMTPDOMAIN: - raise Exception("Bad domain %s" % domain) - if sender not in BMConfigParser().addresses(): - raise Exception("Nonexisting user %s" % sender) - except Exception as err: - logger.error('Bad headers from %s: %r', msg_from, err) - return - - try: - msg_subject = self.decode_header('subject')[0] - except: # noqa:E722 - msg_subject = "Subject missing..." - - msg_tmp = email.message_from_string(data) - body = u'' - for part in msg_tmp.walk(): - if part and part.get_content_type() == "text/plain": - body += part.get_payload(decode=1).decode(part.get_content_charset('utf-8'), errors='replace') - - for to in rcpttos: - try: - rcpt, domain = p.sub(r'\1', to).split("@") - if domain != SMTPDOMAIN: - raise Exception("Bad domain %s" % domain) - logger.debug( - 'Sending %s to %s about %s', sender, rcpt, msg_subject) - self.send(sender, rcpt, msg_subject, body) - logger.info('Relayed %s to %s', sender, rcpt) - except Exception as err: - logger.error('Bad to %s: %r', to, err) - continue - return - - -class smtpServer(StoppableThread): - """SMTP server thread""" - def __init__(self, _=None): - super(smtpServer, self).__init__(name="smtpServerThread") - self.server = smtpServerPyBitmessage(('127.0.0.1', LISTENPORT), None) - - def stopThread(self): - super(smtpServer, self).stopThread() - self.server.close() - return - - def run(self): - asyncore.loop(1) - - -def signals(_, __): - """Signal handler""" - logger.warning('Got signal, terminating') - for thread in threading.enumerate(): - if thread.isAlive() and isinstance(thread, StoppableThread): - thread.stopThread() - - -def runServer(): - """Run SMTP server as a standalone python process""" - logger.warning('Running SMTPd thread') - smtpThread = smtpServer() - smtpThread.start() - signal.signal(signal.SIGINT, signals) - signal.signal(signal.SIGTERM, signals) - logger.warning('Processing') - smtpThread.join() - logger.warning('The end') - - -if __name__ == "__main__": - runServer() diff --git a/src/tests/mock/pybitmessage/class_sqlThread.py b/src/tests/mock/pybitmessage/class_sqlThread.py deleted file mode 100644 index d22ffadb..00000000 --- a/src/tests/mock/pybitmessage/class_sqlThread.py +++ /dev/null @@ -1,639 +0,0 @@ -""" -sqlThread is defined here -""" - -import os -import shutil # used for moving the messages.dat file -import sqlite3 -import sys -import threading -import time - -try: - import helper_sql - import helper_startup - import paths - import queues - import state - from addresses import encodeAddress - from bmconfigparser import BMConfigParser - from debug import logger - from tr import _translate -except ImportError: - from . import helper_sql, helper_startup, paths, queues, state - from .addresses import encodeAddress - from .bmconfigparser import BMConfigParser - from .debug import logger - from .tr import _translate - - -class sqlThread(threading.Thread): - """A thread for all SQL operations""" - - def __init__(self): - threading.Thread.__init__(self, name="SQL") - - def run(self): # pylint: disable=too-many-locals, too-many-branches, too-many-statements - """Process SQL queries from `.helper_sql.sqlSubmitQueue`""" - helper_sql.sql_available = True - self.conn = sqlite3.connect(state.appdata + 'messages.dat') - self.conn.text_factory = str - self.cur = self.conn.cursor() - - self.cur.execute('PRAGMA secure_delete = true') - - # call create_function for encode address - self.create_function() - - try: - self.cur.execute( - '''CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text,''' - ''' received text, message text, folder text, encodingtype int, read bool, sighash blob,''' - ''' UNIQUE(msgid) ON CONFLICT REPLACE)''') - self.cur.execute( - '''CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text,''' - ''' message text, ackdata blob, senttime integer, lastactiontime integer,''' - ''' sleeptill integer, status text, retrynumber integer, folder text, encodingtype int, ttl int)''') - self.cur.execute( - '''CREATE TABLE subscriptions (label text, address text, enabled bool)''') - self.cur.execute( - '''CREATE TABLE addressbook (label text, address text, UNIQUE(address) ON CONFLICT IGNORE)''') - self.cur.execute( - '''CREATE TABLE blacklist (label text, address text, enabled bool)''') - self.cur.execute( - '''CREATE TABLE whitelist (label text, address text, enabled bool)''') - self.cur.execute( - '''CREATE TABLE pubkeys (address text, addressversion int, transmitdata blob, time int,''' - ''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''') - self.cur.execute( - '''CREATE TABLE inventory (hash blob, objecttype int, streamnumber int, payload blob,''' - ''' expirestime integer, tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''') - self.cur.execute( - '''INSERT INTO subscriptions VALUES''' - '''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''') - self.cur.execute( - '''CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE)''') - self.cur.execute('''INSERT INTO settings VALUES('version','11')''') - self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', ( - int(time.time()),)) - self.cur.execute( - '''CREATE TABLE objectprocessorqueue''' - ''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') - self.conn.commit() - logger.info('Created messages database file') - except Exception as err: - if str(err) == 'table inbox already exists': - logger.debug('Database file already exists.') - - else: - sys.stderr.write( - 'ERROR trying to create database file (message.dat). Error message: %s\n' % str(err)) - os._exit(0) - - # If the settings version is equal to 2 or 3 then the - # sqlThread will modify the pubkeys table and change - # the settings version to 4. - settingsversion = BMConfigParser().getint( - 'bitmessagesettings', 'settingsversion') - - # People running earlier versions of PyBitmessage do not have the - # usedpersonally field in their pubkeys table. Let's add it. - if settingsversion == 2: - item = '''ALTER TABLE pubkeys ADD usedpersonally text DEFAULT 'no' ''' - parameters = '' - self.cur.execute(item, parameters) - self.conn.commit() - - settingsversion = 3 - - # People running earlier versions of PyBitmessage do not have the - # encodingtype field in their inbox and sent tables or the read field - # in the inbox table. Let's add them. - if settingsversion == 3: - item = '''ALTER TABLE inbox ADD encodingtype int DEFAULT '2' ''' - parameters = '' - self.cur.execute(item, parameters) - - item = '''ALTER TABLE inbox ADD read bool DEFAULT '1' ''' - parameters = '' - self.cur.execute(item, parameters) - - item = '''ALTER TABLE sent ADD encodingtype int DEFAULT '2' ''' - parameters = '' - self.cur.execute(item, parameters) - self.conn.commit() - - settingsversion = 4 - - BMConfigParser().set( - 'bitmessagesettings', 'settingsversion', str(settingsversion)) - BMConfigParser().save() - - helper_startup.updateConfig() - - # From now on, let us keep a 'version' embedded in the messages.dat - # file so that when we make changes to the database, the database - # version we are on can stay embedded in the messages.dat file. Let us - # check to see if the settings table exists yet. - item = '''SELECT name FROM sqlite_master WHERE type='table' AND name='settings';''' - parameters = '' - self.cur.execute(item, parameters) - if self.cur.fetchall() == []: - # The settings table doesn't exist. We need to make it. - logger.debug( - "In messages.dat database, creating new 'settings' table.") - self.cur.execute( - '''CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE)''') - self.cur.execute('''INSERT INTO settings VALUES('version','1')''') - self.cur.execute('''INSERT INTO settings VALUES('lastvacuumtime',?)''', ( - int(time.time()),)) - logger.debug('In messages.dat database, removing an obsolete field from the pubkeys table.') - self.cur.execute( - '''CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int,''' - ''' usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE);''') - self.cur.execute( - '''INSERT INTO pubkeys_backup SELECT hash, transmitdata, time, usedpersonally FROM pubkeys;''') - self.cur.execute('''DROP TABLE pubkeys''') - self.cur.execute( - '''CREATE TABLE pubkeys''' - ''' (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE)''') - self.cur.execute( - '''INSERT INTO pubkeys SELECT hash, transmitdata, time, usedpersonally FROM pubkeys_backup;''') - self.cur.execute('''DROP TABLE pubkeys_backup;''') - logger.debug( - 'Deleting all pubkeys from inventory.' - ' They will be redownloaded and then saved with the correct times.') - self.cur.execute( - '''delete from inventory where objecttype = 'pubkey';''') - logger.debug('replacing Bitmessage announcements mailing list with a new one.') - self.cur.execute( - '''delete from subscriptions where address='BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx' ''') - self.cur.execute( - '''INSERT INTO subscriptions VALUES''' - '''('Bitmessage new releases/announcements','BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw',1)''') - logger.debug('Commiting.') - self.conn.commit() - logger.debug('Vacuuming message.dat. You might notice that the file size gets much smaller.') - self.cur.execute(''' VACUUM ''') - - # After code refactoring, the possible status values for sent messages - # have changed. - self.cur.execute( - '''update sent set status='doingmsgpow' where status='doingpow' ''') - self.cur.execute( - '''update sent set status='msgsent' where status='sentmessage' ''') - self.cur.execute( - '''update sent set status='doingpubkeypow' where status='findingpubkey' ''') - self.cur.execute( - '''update sent set status='broadcastqueued' where status='broadcastpending' ''') - self.conn.commit() - - # Let's get rid of the first20bytesofencryptedmessage field in - # the inventory table. - item = '''SELECT value FROM settings WHERE key='version';''' - parameters = '' - self.cur.execute(item, parameters) - if int(self.cur.fetchall()[0][0]) == 2: - logger.debug( - 'In messages.dat database, removing an obsolete field from' - ' the inventory table.') - self.cur.execute( - '''CREATE TEMPORARY TABLE inventory_backup''' - '''(hash blob, objecttype text, streamnumber int, payload blob,''' - ''' receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE);''') - self.cur.execute( - '''INSERT INTO inventory_backup SELECT hash, objecttype, streamnumber, payload, receivedtime''' - ''' FROM inventory;''') - self.cur.execute('''DROP TABLE inventory''') - self.cur.execute( - '''CREATE TABLE inventory''' - ''' (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer,''' - ''' UNIQUE(hash) ON CONFLICT REPLACE)''') - self.cur.execute( - '''INSERT INTO inventory SELECT hash, objecttype, streamnumber, payload, receivedtime''' - ''' FROM inventory_backup;''') - self.cur.execute('''DROP TABLE inventory_backup;''') - item = '''update settings set value=? WHERE key='version';''' - parameters = (3,) - self.cur.execute(item, parameters) - - # Add a new column to the inventory table to store tags. - item = '''SELECT value FROM settings WHERE key='version';''' - parameters = '' - self.cur.execute(item, parameters) - currentVersion = int(self.cur.fetchall()[0][0]) - if currentVersion == 1 or currentVersion == 3: - logger.debug( - 'In messages.dat database, adding tag field to' - ' the inventory table.') - item = '''ALTER TABLE inventory ADD tag blob DEFAULT '' ''' - parameters = '' - self.cur.execute(item, parameters) - item = '''update settings set value=? WHERE key='version';''' - parameters = (4,) - self.cur.execute(item, parameters) - - # Add a new column to the pubkeys table to store the address version. - # We're going to trash all of our pubkeys and let them be redownloaded. - item = '''SELECT value FROM settings WHERE key='version';''' - parameters = '' - self.cur.execute(item, parameters) - currentVersion = int(self.cur.fetchall()[0][0]) - if currentVersion == 4: - self.cur.execute('''DROP TABLE pubkeys''') - self.cur.execute( - '''CREATE TABLE pubkeys (hash blob, addressversion int, transmitdata blob, time int,''' - '''usedpersonally text, UNIQUE(hash, addressversion) ON CONFLICT REPLACE)''') - self.cur.execute( - '''delete from inventory where objecttype = 'pubkey';''') - item = '''update settings set value=? WHERE key='version';''' - parameters = (5,) - self.cur.execute(item, parameters) - - # Add a new table: objectprocessorqueue with which to hold objects - # that have yet to be processed if the user shuts down Bitmessage. - item = '''SELECT value FROM settings WHERE key='version';''' - parameters = '' - self.cur.execute(item, parameters) - currentVersion = int(self.cur.fetchall()[0][0]) - if currentVersion == 5: - self.cur.execute('''DROP TABLE knownnodes''') - self.cur.execute( - '''CREATE TABLE objectprocessorqueue''' - ''' (objecttype text, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') - item = '''update settings set value=? WHERE key='version';''' - parameters = (6,) - self.cur.execute(item, parameters) - - # changes related to protocol v3 - # In table inventory and objectprocessorqueue, objecttype is now - # an integer (it was a human-friendly string previously) - item = '''SELECT value FROM settings WHERE key='version';''' - parameters = '' - self.cur.execute(item, parameters) - currentVersion = int(self.cur.fetchall()[0][0]) - if currentVersion == 6: - logger.debug( - 'In messages.dat database, dropping and recreating' - ' the inventory table.') - self.cur.execute('''DROP TABLE inventory''') - self.cur.execute( - '''CREATE TABLE inventory''' - ''' (hash blob, objecttype int, streamnumber int, payload blob, expirestime integer,''' - ''' tag blob, UNIQUE(hash) ON CONFLICT REPLACE)''') - self.cur.execute('''DROP TABLE objectprocessorqueue''') - self.cur.execute( - '''CREATE TABLE objectprocessorqueue''' - ''' (objecttype int, data blob, UNIQUE(objecttype, data) ON CONFLICT REPLACE)''') - item = '''update settings set value=? WHERE key='version';''' - parameters = (7,) - self.cur.execute(item, parameters) - logger.debug( - 'Finished dropping and recreating the inventory table.') - - # The format of data stored in the pubkeys table has changed. Let's - # clear it, and the pubkeys from inventory, so that they'll - # be re-downloaded. - item = '''SELECT value FROM settings WHERE key='version';''' - parameters = '' - self.cur.execute(item, parameters) - currentVersion = int(self.cur.fetchall()[0][0]) - if currentVersion == 7: - logger.debug( - 'In messages.dat database, clearing pubkeys table' - ' because the data format has been updated.') - self.cur.execute( - '''delete from inventory where objecttype = 1;''') - self.cur.execute( - '''delete from pubkeys;''') - # Any sending messages for which we *thought* that we had - # the pubkey must be rechecked. - self.cur.execute( - '''UPDATE sent SET status='msgqueued' WHERE status='doingmsgpow' or status='badkey';''') - query = '''update settings set value=? WHERE key='version';''' - parameters = (8,) - self.cur.execute(query, parameters) - logger.debug('Finished clearing currently held pubkeys.') - - # Add a new column to the inbox table to store the hash of - # the message signature. We'll use this as temporary message UUID - # in order to detect duplicates. - item = '''SELECT value FROM settings WHERE key='version';''' - parameters = '' - self.cur.execute(item, parameters) - currentVersion = int(self.cur.fetchall()[0][0]) - if currentVersion == 8: - logger.debug( - 'In messages.dat database, adding sighash field to' - ' the inbox table.') - item = '''ALTER TABLE inbox ADD sighash blob DEFAULT '' ''' - parameters = '' - self.cur.execute(item, parameters) - item = '''update settings set value=? WHERE key='version';''' - parameters = (9,) - self.cur.execute(item, parameters) - - # We'll also need a `sleeptill` field and a `ttl` field. Also we - # can combine the pubkeyretrynumber and msgretrynumber into one. - - item = '''SELECT value FROM settings WHERE key='version';''' - parameters = '' - self.cur.execute(item, parameters) - currentVersion = int(self.cur.fetchall()[0][0]) - if currentVersion == 9: - logger.info( - 'In messages.dat database, making TTL-related changes:' - ' combining the pubkeyretrynumber and msgretrynumber' - ' fields into the retrynumber field and adding the' - ' sleeptill and ttl fields...') - self.cur.execute( - '''CREATE TEMPORARY TABLE sent_backup''' - ''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,''' - ''' ackdata blob, lastactiontime integer, status text, retrynumber integer,''' - ''' folder text, encodingtype int)''') - self.cur.execute( - '''INSERT INTO sent_backup SELECT msgid, toaddress, toripe, fromaddress,''' - ''' subject, message, ackdata, lastactiontime,''' - ''' status, 0, folder, encodingtype FROM sent;''') - self.cur.execute('''DROP TABLE sent''') - self.cur.execute( - '''CREATE TABLE sent''' - ''' (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text,''' - ''' ackdata blob, senttime integer, lastactiontime integer, sleeptill int, status text,''' - ''' retrynumber integer, folder text, encodingtype int, ttl int)''') - self.cur.execute( - '''INSERT INTO sent SELECT msgid, toaddress, toripe, fromaddress, subject, message, ackdata,''' - ''' lastactiontime, lastactiontime, 0, status, 0, folder, encodingtype, 216000 FROM sent_backup;''') - self.cur.execute('''DROP TABLE sent_backup''') - logger.info('In messages.dat database, finished making TTL-related changes.') - logger.debug('In messages.dat database, adding address field to the pubkeys table.') - # We're going to have to calculate the address for each row in the pubkeys - # table. Then we can take out the hash field. - self.cur.execute('''ALTER TABLE pubkeys ADD address text DEFAULT '' ;''') - - # replica for loop to update hashed address - self.cur.execute('''UPDATE pubkeys SET address=(enaddr(pubkeys.addressversion, 1, hash)); ''') - - # Now we can remove the hash field from the pubkeys table. - self.cur.execute( - '''CREATE TEMPORARY TABLE pubkeys_backup''' - ''' (address text, addressversion int, transmitdata blob, time int,''' - ''' usedpersonally text, UNIQUE(address) ON CONFLICT REPLACE)''') - self.cur.execute( - '''INSERT INTO pubkeys_backup''' - ''' SELECT address, addressversion, transmitdata, time, usedpersonally FROM pubkeys;''') - self.cur.execute('''DROP TABLE pubkeys''') - self.cur.execute( - '''CREATE TABLE pubkeys''' - ''' (address text, addressversion int, transmitdata blob, time int, usedpersonally text,''' - ''' UNIQUE(address) ON CONFLICT REPLACE)''') - self.cur.execute( - '''INSERT INTO pubkeys SELECT''' - ''' address, addressversion, transmitdata, time, usedpersonally FROM pubkeys_backup;''') - self.cur.execute('''DROP TABLE pubkeys_backup''') - logger.debug( - 'In messages.dat database, done adding address field to the pubkeys table' - ' and removing the hash field.') - self.cur.execute('''update settings set value=10 WHERE key='version';''') - - # Update the address colunm to unique in addressbook table - item = '''SELECT value FROM settings WHERE key='version';''' - parameters = '' - self.cur.execute(item, parameters) - currentVersion = int(self.cur.fetchall()[0][0]) - if currentVersion == 10: - logger.debug( - 'In messages.dat database, updating address column to UNIQUE' - ' in the addressbook table.') - self.cur.execute( - '''ALTER TABLE addressbook RENAME TO old_addressbook''') - self.cur.execute( - '''CREATE TABLE addressbook''' - ''' (label text, address text, UNIQUE(address) ON CONFLICT IGNORE)''') - self.cur.execute( - '''INSERT INTO addressbook SELECT label, address FROM old_addressbook;''') - self.cur.execute('''DROP TABLE old_addressbook''') - self.cur.execute('''update settings set value=11 WHERE key='version';''') - - # Are you hoping to add a new option to the keys.dat file of existing - # Bitmessage users or modify the SQLite database? Add it right - # above this line! - - try: - testpayload = '\x00\x00' - t = ('1234', 1, testpayload, '12345678', 'no') - self.cur.execute('''INSERT INTO pubkeys VALUES(?,?,?,?,?)''', t) - self.conn.commit() - self.cur.execute( - '''SELECT transmitdata FROM pubkeys WHERE address='1234' ''') - queryreturn = self.cur.fetchall() - for row in queryreturn: - transmitdata, = row - self.cur.execute('''DELETE FROM pubkeys WHERE address='1234' ''') - self.conn.commit() - if transmitdata == '': - logger.fatal( - 'Problem: The version of SQLite you have cannot store Null values.' - ' Please download and install the latest revision of your version of Python' - ' (for example, the latest Python 2.7 revision) and try again.\n') - logger.fatal( - 'PyBitmessage will now exit very abruptly.' - ' You may now see threading errors related to this abrupt exit' - ' but the problem you need to solve is related to SQLite.\n\n') - os._exit(0) - except Exception as err: - if str(err) == 'database or disk is full': - logger.fatal( - '(While null value test) Alert: Your disk or data storage volume is full.' - ' sqlThread will now exit.') - queues.UISignalQueue.put(( - 'alert', ( - _translate( - "MainWindow", - "Disk full"), - _translate( - "MainWindow", - 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), - True))) - os._exit(0) - else: - logger.error(err) - - # Let us check to see the last time we vaccumed the messages.dat file. - # If it has been more than a month let's do it now. - item = '''SELECT value FROM settings WHERE key='lastvacuumtime';''' - parameters = '' - self.cur.execute(item, parameters) - queryreturn = self.cur.fetchall() - for row in queryreturn: - value, = row - if int(value) < int(time.time()) - 86400: - logger.info('It has been a long time since the messages.dat file has been vacuumed. Vacuuming now...') - try: - self.cur.execute(''' VACUUM ''') - except Exception as err: - if str(err) == 'database or disk is full': - logger.fatal( - '(While VACUUM) Alert: Your disk or data storage volume is full.' - ' sqlThread will now exit.') - queues.UISignalQueue.put(( - 'alert', ( - _translate( - "MainWindow", - "Disk full"), - _translate( - "MainWindow", - 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), - True))) - os._exit(0) - item = '''update settings set value=? WHERE key='lastvacuumtime';''' - parameters = (int(time.time()),) - self.cur.execute(item, parameters) - - helper_sql.sql_ready.set() - - while True: - item = helper_sql.sqlSubmitQueue.get() - if item == 'commit': - try: - self.conn.commit() - except Exception as err: - if str(err) == 'database or disk is full': - logger.fatal( - '(While committing) Alert: Your disk or data storage volume is full.' - ' sqlThread will now exit.') - queues.UISignalQueue.put(( - 'alert', ( - _translate( - "MainWindow", - "Disk full"), - _translate( - "MainWindow", - 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), - True))) - os._exit(0) - elif item == 'exit': - self.conn.close() - logger.info('sqlThread exiting gracefully.') - - return - elif item == 'movemessagstoprog': - logger.debug('the sqlThread is moving the messages.dat file to the local program directory.') - - try: - self.conn.commit() - except Exception as err: - if str(err) == 'database or disk is full': - logger.fatal( - '(while movemessagstoprog) Alert: Your disk or data storage volume is full.' - ' sqlThread will now exit.') - queues.UISignalQueue.put(( - 'alert', ( - _translate( - "MainWindow", - "Disk full"), - _translate( - "MainWindow", - 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), - True))) - os._exit(0) - self.conn.close() - shutil.move( - paths.lookupAppdataFolder() + 'messages.dat', paths.lookupExeFolder() + 'messages.dat') - self.conn = sqlite3.connect(paths.lookupExeFolder() + 'messages.dat') - self.conn.text_factory = str - self.cur = self.conn.cursor() - elif item == 'movemessagstoappdata': - logger.debug('the sqlThread is moving the messages.dat file to the Appdata folder.') - - try: - self.conn.commit() - except Exception as err: - if str(err) == 'database or disk is full': - logger.fatal( - '(while movemessagstoappdata) Alert: Your disk or data storage volume is full.' - ' sqlThread will now exit.') - queues.UISignalQueue.put(( - 'alert', ( - _translate( - "MainWindow", - "Disk full"), - _translate( - "MainWindow", - 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), - True))) - os._exit(0) - self.conn.close() - shutil.move( - paths.lookupExeFolder() + 'messages.dat', paths.lookupAppdataFolder() + 'messages.dat') - self.conn = sqlite3.connect(paths.lookupAppdataFolder() + 'messages.dat') - self.conn.text_factory = str - self.cur = self.conn.cursor() - elif item == 'deleteandvacuume': - self.cur.execute('''delete from inbox where folder='trash' ''') - self.cur.execute('''delete from sent where folder='trash' ''') - self.conn.commit() - try: - self.cur.execute(''' VACUUM ''') - except Exception as err: - if str(err) == 'database or disk is full': - logger.fatal( - '(while deleteandvacuume) Alert: Your disk or data storage volume is full.' - ' sqlThread will now exit.') - queues.UISignalQueue.put(( - 'alert', ( - _translate( - "MainWindow", - "Disk full"), - _translate( - "MainWindow", - 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), - True))) - os._exit(0) - else: - parameters = helper_sql.sqlSubmitQueue.get() - rowcount = 0 - try: - self.cur.execute(item, parameters) - rowcount = self.cur.rowcount - except Exception as err: - if str(err) == 'database or disk is full': - logger.fatal( - '(while cur.execute) Alert: Your disk or data storage volume is full.' - ' sqlThread will now exit.') - queues.UISignalQueue.put(( - 'alert', ( - _translate( - "MainWindow", - "Disk full"), - _translate( - "MainWindow", - 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), - True))) - os._exit(0) - else: - logger.fatal( - 'Major error occurred when trying to execute a SQL statement within the sqlThread.' - ' Please tell Atheros about this error message or post it in the forum!' - ' Error occurred while trying to execute statement: "%s" Here are the parameters;' - ' you might want to censor this data with asterisks (***)' - ' as it can contain private information: %s.' - ' Here is the actual error message thrown by the sqlThread: %s', - str(item), - str(repr(parameters)), - str(err)) - logger.fatal('This program shall now abruptly exit!') - - os._exit(0) - - helper_sql.sqlReturnQueue.put((self.cur.fetchall(), rowcount)) - # helper_sql.sqlSubmitQueue.task_done() - - def create_function(self): - # create_function - try: - self.conn.create_function("enaddr", 3, func=encodeAddress, deterministic=True) - except (TypeError, sqlite3.NotSupportedError) as err: - logger.debug( - "Got error while pass deterministic in sqlite create function {}, Passing 3 params".format(err)) - self.conn.create_function("enaddr", 3, encodeAddress) diff --git a/src/tests/mock/pybitmessage/debug.py b/src/tests/mock/pybitmessage/debug.py deleted file mode 100644 index a70cb543..00000000 --- a/src/tests/mock/pybitmessage/debug.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Logging and debuging facility ------------------------------ - -Levels: - - DEBUG - Detailed information, typically of interest only when diagnosing problems. - INFO - Confirmation that things are working as expected. - WARNING - An indication that something unexpected happened, or indicative of - some problem in the near future (e.g. 'disk space low'). The software - is still working as expected. - ERROR - Due to a more serious problem, the software has not been able to - perform some function. - CRITICAL - A serious error, indicating that the program itself may be unable to - continue running. - -There are three loggers by default: `console_only`, `file_only` and `both`. -You can configure logging in the logging.dat in the appdata dir. -It's format is described in the :func:`logging.config.fileConfig` doc. - -Use: - ->>> import logging ->>> logger = logging.getLogger('default') - -The old form: ``from debug import logger`` is also may be used, -but only in the top level modules. - -Logging is thread-safe so you don't have to worry about locks, -just import and log. -""" - -import logging -import logging.config -import os -import sys - -from six.moves import configparser - -import helper_startup -import state - -helper_startup.loadConfig() - -# Now can be overriden from a config file, which uses standard python -# logging.config.fileConfig interface -# examples are here: -# https://bitmessage.org/forum/index.php/topic,4820.msg11163.html#msg11163 -log_level = 'WARNING' - - -def log_uncaught_exceptions(ex_cls, ex, tb): - """The last resort logging function used for sys.excepthook""" - logging.critical('Unhandled exception', exc_info=(ex_cls, ex, tb)) - - -def configureLogging(): - """ - Configure logging, - using either logging.dat file in the state.appdata dir - or dictionary with hardcoded settings. - """ - sys.excepthook = log_uncaught_exceptions - fail_msg = '' - try: - logging_config = os.path.join(state.appdata, 'logging.dat') - logging.config.fileConfig( - logging_config, disable_existing_loggers=False) - return ( - False, - 'Loaded logger configuration from %s' % logging_config - ) - except (OSError, configparser.NoSectionError, KeyError): - if os.path.isfile(logging_config): - fail_msg = \ - 'Failed to load logger configuration from %s, using default' \ - ' logging config\n%s' % \ - (logging_config, sys.exc_info()) - else: - # no need to confuse the user if the logger config - # is missing entirely - fail_msg = 'Using default logger configuration' - - logging_config = { - 'version': 1, - 'formatters': { - 'default': { - 'format': u'%(asctime)s - %(levelname)s - %(message)s', - }, - }, - 'handlers': { - 'console': { - 'class': 'logging.StreamHandler', - 'formatter': 'default', - 'level': log_level, - 'stream': 'ext://sys.stderr' - }, - 'file': { - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'level': log_level, - 'filename': os.path.join(state.appdata, 'debug.log'), - 'maxBytes': 2097152, # 2 MiB - 'backupCount': 1, - 'encoding': 'UTF-8', - } - }, - 'loggers': { - 'console_only': { - 'handlers': ['console'], - 'propagate': 0 - }, - 'file_only': { - 'handlers': ['file'], - 'propagate': 0 - }, - 'both': { - 'handlers': ['console', 'file'], - 'propagate': 0 - }, - }, - 'root': { - 'level': log_level, - 'handlers': ['console'], - }, - } - - logging_config['loggers']['default'] = logging_config['loggers'][ - 'file_only' if '-c' in sys.argv else 'both'] - logging.config.dictConfig(logging_config) - - return True, fail_msg - - -def resetLogging(): - """Reconfigure logging in runtime when state.appdata dir changed""" - # pylint: disable=global-statement, used-before-assignment - global logger - for i in logger.handlers: - logger.removeHandler(i) - i.flush() - i.close() - configureLogging() - logger = logging.getLogger('default') - - -# ! - -preconfigured, msg = configureLogging() -logger = logging.getLogger('default') -if msg: - logger.log(logging.WARNING if preconfigured else logging.INFO, msg) diff --git a/src/tests/mock/pybitmessage/defaults.py b/src/tests/mock/pybitmessage/defaults.py deleted file mode 100644 index 32162b56..00000000 --- a/src/tests/mock/pybitmessage/defaults.py +++ /dev/null @@ -1,24 +0,0 @@ -""" -Common default values -""" - -#: sanity check, prevent doing ridiculous PoW -#: 20 million PoWs equals approximately 2 days on dev's dual R9 290 -ridiculousDifficulty = 20000000 - -#: Remember here the RPC port read from namecoin.conf so we can restore to -#: it as default whenever the user changes the "method" selection for -#: namecoin integration to "namecoind". -namecoinDefaultRpcPort = "8336" - -# If changed, these values will cause particularly unexpected behavior: -# You won't be able to either send or receive messages because the proof -# of work you do (or demand) won't match that done or demanded by others. -# Don't change them! -#: The amount of work that should be performed (and demanded) per byte -#: of the payload. -networkDefaultProofOfWorkNonceTrialsPerByte = 1000 -#: To make sending short messages a little more difficult, this value is -#: added to the payload length for use in calculating the proof of work -#: target. -networkDefaultPayloadLengthExtraBytes = 1000 diff --git a/src/tests/mock/pybitmessage/depends.py b/src/tests/mock/pybitmessage/depends.py deleted file mode 100755 index 268137ec..00000000 --- a/src/tests/mock/pybitmessage/depends.py +++ /dev/null @@ -1,450 +0,0 @@ -""" -Utility functions to check the availability of dependencies -and suggest how it may be installed -""" - -import os -import re -import sys - -# Only really old versions of Python don't have sys.hexversion. We don't -# support them. The logging module was introduced in Python 2.3 -if not hasattr(sys, 'hexversion') or sys.hexversion < 0x20300F0: - sys.exit( - 'Python version: %s\n' - 'PyBitmessage requires Python 2.7.4 or greater (but not Python 3)' - % sys.version - ) - -import logging # noqa:E402 -import subprocess - -from importlib import import_module - -# We can now use logging so set up a simple configuration -formatter = logging.Formatter('%(levelname)s: %(message)s') -handler = logging.StreamHandler(sys.stdout) -handler.setFormatter(formatter) -logger = logging.getLogger('both') -logger.addHandler(handler) -logger.setLevel(logging.ERROR) - - -OS_RELEASE = { - "Debian GNU/Linux".lower(): "Debian", - "fedora": "Fedora", - "opensuse": "openSUSE", - "ubuntu": "Ubuntu", - "gentoo": "Gentoo", - "calculate": "Gentoo" -} - -PACKAGE_MANAGER = { - "OpenBSD": "pkg_add", - "FreeBSD": "pkg install", - "Debian": "apt-get install", - "Ubuntu": "apt-get install", - "Ubuntu 12": "apt-get install", - "openSUSE": "zypper install", - "Fedora": "dnf install", - "Guix": "guix package -i", - "Gentoo": "emerge" -} - -PACKAGES = { - "PyQt4": { - "OpenBSD": "py-qt4", - "FreeBSD": "py27-qt4", - "Debian": "python-qt4", - "Ubuntu": "python-qt4", - "Ubuntu 12": "python-qt4", - "openSUSE": "python-qt", - "Fedora": "PyQt4", - "Guix": "python2-pyqt@4.11.4", - "Gentoo": "dev-python/PyQt4", - "optional": True, - "description": - "You only need PyQt if you want to use the GUI." - " When only running as a daemon, this can be skipped.\n" - "However, you would have to install it manually" - " because setuptools does not support PyQt." - }, - "msgpack": { - "OpenBSD": "py-msgpack", - "FreeBSD": "py27-msgpack-python", - "Debian": "python-msgpack", - "Ubuntu": "python-msgpack", - "Ubuntu 12": "msgpack-python", - "openSUSE": "python-msgpack-python", - "Fedora": "python2-msgpack", - "Guix": "python2-msgpack", - "Gentoo": "dev-python/msgpack", - "optional": True, - "description": - "python-msgpack is recommended for improved performance of" - " message encoding/decoding" - }, - "pyopencl": { - "FreeBSD": "py27-pyopencl", - "Debian": "python-pyopencl", - "Ubuntu": "python-pyopencl", - "Ubuntu 12": "python-pyopencl", - "Fedora": "python2-pyopencl", - "openSUSE": "", - "OpenBSD": "", - "Guix": "", - "Gentoo": "dev-python/pyopencl", - "optional": True, - "description": - "If you install pyopencl, you will be able to use" - " GPU acceleration for proof of work.\n" - "You also need a compatible GPU and drivers." - }, - "setuptools": { - "OpenBSD": "py-setuptools", - "FreeBSD": "py27-setuptools", - "Debian": "python-setuptools", - "Ubuntu": "python-setuptools", - "Ubuntu 12": "python-setuptools", - "Fedora": "python2-setuptools", - "openSUSE": "python-setuptools", - "Guix": "python2-setuptools", - "Gentoo": "dev-python/setuptools", - "optional": False, - } -} - - -def detectOS(): - """Finding out what Operating System is running""" - if detectOS.result is not None: - return detectOS.result - if sys.platform.startswith('openbsd'): - detectOS.result = "OpenBSD" - elif sys.platform.startswith('freebsd'): - detectOS.result = "FreeBSD" - elif sys.platform.startswith('win'): - detectOS.result = "Windows" - elif os.path.isfile("/etc/os-release"): - detectOSRelease() - elif os.path.isfile("/etc/config.scm"): - detectOS.result = "Guix" - return detectOS.result - - -detectOS.result = None - - -def detectOSRelease(): - """Detecting the release of OS""" - with open("/etc/os-release", 'r') as osRelease: - version = None - for line in osRelease: - if line.startswith("NAME="): - detectOS.result = OS_RELEASE.get( - line.replace('"', '').split("=")[-1].strip().lower()) - elif line.startswith("VERSION_ID="): - try: - version = float(line.split("=")[1].replace("\"", "")) - except ValueError: - pass - if detectOS.result == "Ubuntu" and version < 14: - detectOS.result = "Ubuntu 12" - - -def try_import(module, log_extra=False): - """Try to import the non imported packages""" - try: - return import_module(module) - except ImportError: - module = module.split('.')[0] - logger.error('The %s module is not available.', module) - if log_extra: - logger.error(log_extra) - dist = detectOS() - logger.error( - 'On %s, try running "%s %s" as root.', - dist, PACKAGE_MANAGER[dist], PACKAGES[module][dist]) - return False - - -def check_ripemd160(): - """Check availability of the RIPEMD160 hash function""" - try: - from fallback import RIPEMD160Hash # pylint: disable=relative-import - except ImportError: - return False - return RIPEMD160Hash is not None - - -def check_sqlite(): - """Do sqlite check. - - Simply check sqlite3 module if exist or not with hexversion - support in python version for specifieed platform. - """ - if sys.hexversion < 0x020500F0: - logger.error( - 'The sqlite3 module is not included in this version of Python.') - if sys.platform.startswith('freebsd'): - logger.error( - 'On FreeBSD, try running "pkg install py27-sqlite3" as root.') - return False - - sqlite3 = try_import('sqlite3') - if not sqlite3: - return False - - logger.info('sqlite3 Module Version: %s', sqlite3.version) - logger.info('SQLite Library Version: %s', sqlite3.sqlite_version) - # sqlite_version_number formula: https://sqlite.org/c3ref/c_source_id.html - sqlite_version_number = ( - sqlite3.sqlite_version_info[0] * 1000000 - + sqlite3.sqlite_version_info[1] * 1000 - + sqlite3.sqlite_version_info[2] - ) - - conn = None - try: - try: - conn = sqlite3.connect(':memory:') - if sqlite_version_number >= 3006018: - sqlite_source_id = conn.execute( - 'SELECT sqlite_source_id();' - ).fetchone()[0] - logger.info('SQLite Library Source ID: %s', sqlite_source_id) - if sqlite_version_number >= 3006023: - compile_options = ', '.join( - [row[0] for row in conn.execute('PRAGMA compile_options;')]) - logger.info( - 'SQLite Library Compile Options: %s', compile_options) - # There is no specific version requirement as yet, so we just - # use the first version that was included with Python. - if sqlite_version_number < 3000008: - logger.error( - 'This version of SQLite is too old.' - ' PyBitmessage requires SQLite 3.0.8 or later') - return False - return True - except sqlite3.Error: - logger.exception('An exception occured while checking sqlite.') - return False - finally: - if conn: - conn.close() - - -def check_openssl(): - """Do openssl dependency check. - - Here we are checking for openssl with its all dependent libraries - and version checking. - """ - # pylint: disable=too-many-branches, too-many-return-statements - # pylint: disable=protected-access, redefined-outer-name - ctypes = try_import('ctypes') - if not ctypes: - logger.error('Unable to check OpenSSL.') - return False - - # We need to emulate the way PyElliptic searches for OpenSSL. - if sys.platform == 'win32': - paths = ['libeay32.dll'] - if getattr(sys, 'frozen', False): - paths.insert(0, os.path.join(sys._MEIPASS, 'libeay32.dll')) - else: - paths = ['libcrypto.so', 'libcrypto.so.1.0.0'] - if sys.platform == 'darwin': - paths.extend([ - 'libcrypto.dylib', - '/usr/local/opt/openssl/lib/libcrypto.dylib', - './../Frameworks/libcrypto.dylib' - ]) - - if re.match(r'linux|darwin|freebsd', sys.platform): - try: - import ctypes.util - path = ctypes.util.find_library('ssl') - if path not in paths: - paths.append(path) - except: # noqa:E722 - pass - - openssl_version = None - openssl_hexversion = None - openssl_cflags = None - - cflags_regex = re.compile(r'(?:OPENSSL_NO_)(AES|EC|ECDH|ECDSA)(?!\w)') - - import pyelliptic.openssl - - for path in paths: - logger.info('Checking OpenSSL at %s', path) - try: - library = ctypes.CDLL(path) - except OSError: - continue - logger.info('OpenSSL Name: %s', library._name) - try: - openssl_version, openssl_hexversion, openssl_cflags = \ - pyelliptic.openssl.get_version(library) - except AttributeError: # sphinx chokes - return True - if not openssl_version: - logger.error('Cannot determine version of this OpenSSL library.') - return False - logger.info('OpenSSL Version: %s', openssl_version) - logger.info('OpenSSL Compile Options: %s', openssl_cflags) - # PyElliptic uses EVP_CIPHER_CTX_new and EVP_CIPHER_CTX_free which were - # introduced in 0.9.8b. - if openssl_hexversion < 0x90802F: - logger.error( - 'This OpenSSL library is too old. PyBitmessage requires' - ' OpenSSL 0.9.8b or later with AES, Elliptic Curves (EC),' - ' ECDH, and ECDSA enabled.') - return False - matches = cflags_regex.findall(openssl_cflags.decode('utf-8', "ignore")) - if matches: - logger.error( - 'This OpenSSL library is missing the following required' - ' features: %s. PyBitmessage requires OpenSSL 0.9.8b' - ' or later with AES, Elliptic Curves (EC), ECDH,' - ' and ECDSA enabled.', ', '.join(matches)) - return False - return True - return False - - -# ..todo:: The minimum versions of pythondialog and dialog need to be determined -def check_curses(): - """Do curses dependency check. - - Here we are checking for curses if available or not with check as interface - requires the `pythondialog `_ package - and the dialog utility. - """ - if sys.hexversion < 0x20600F0: - logger.error( - 'The curses interface requires the pythondialog package and' - ' the dialog utility.') - return False - curses = try_import('curses') - if not curses: - logger.error('The curses interface can not be used.') - return False - - logger.info('curses Module Version: %s', curses.version) - - dialog = try_import('dialog') - if not dialog: - logger.error('The curses interface can not be used.') - return False - - try: - subprocess.check_call(['which', 'dialog']) - except subprocess.CalledProcessError: - logger.error( - 'Curses requires the `dialog` command to be installed as well as' - ' the python library.') - return False - - logger.info('pythondialog Package Version: %s', dialog.__version__) - dialog_util_version = dialog.Dialog().cached_backend_version - # The pythondialog author does not like Python2 str, so we have to use - # unicode for just the version otherwise we get the repr form which - # includes the module and class names along with the actual version. - logger.info('dialog Utility Version %s', dialog_util_version.decode('utf-8')) - return True - - -def check_pyqt(): - """Do pyqt dependency check. - - Here we are checking for PyQt4 with its version, as for it require - PyQt 4.8 or later. - """ - QtCore = try_import( - 'PyQt4.QtCore', 'PyBitmessage requires PyQt 4.8 or later and Qt 4.7 or later.') - - if not QtCore: - return False - - logger.info('PyQt Version: %s', QtCore.PYQT_VERSION_STR) - logger.info('Qt Version: %s', QtCore.QT_VERSION_STR) - passed = True - if QtCore.PYQT_VERSION < 0x40800: - logger.error( - 'This version of PyQt is too old. PyBitmessage requries' - ' PyQt 4.8 or later.') - passed = False - if QtCore.QT_VERSION < 0x40700: - logger.error( - 'This version of Qt is too old. PyBitmessage requries' - ' Qt 4.7 or later.') - passed = False - return passed - - -def check_msgpack(): - """Do sgpack module check. - - simply checking if msgpack package with all its dependency - is available or not as recommended for messages coding. - """ - return try_import( - 'msgpack', 'It is highly recommended for messages coding.') is not False - - -def check_dependencies(verbose=False, optional=False): - """Do dependency check. - - It identifies project dependencies and checks if there are - any known, publicly disclosed, vulnerabilities.basically - scan applications (and their dependent libraries) so that - easily identify any known vulnerable components. - """ - if verbose: - logger.setLevel(logging.INFO) - - has_all_dependencies = True - - # Python 2.7.4 is the required minimum. - # (https://bitmessage.org/forum/index.php?topic=4081.0) - # Python 3+ is not supported, but it is still useful to provide - # information about our other requirements. - logger.info('Python version: %s', sys.version) - if sys.hexversion < 0x20704F0: - logger.error( - 'PyBitmessage requires Python 2.7.4 or greater' - ' (but not Python 3+)') - has_all_dependencies = False - if sys.hexversion >= 0x3000000: - logger.error( - 'PyBitmessage does not support Python 3+. Python 2.7.4' - ' or greater is required. Python 2.7.18 is recommended.') - sys.exit() - - # FIXME: This needs to be uncommented when more of the code is python3 compatible - # if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3060000: - # print("PyBitmessage requires python >= 3.6 if using python 3") - - check_functions = [check_ripemd160, check_sqlite, check_openssl] - if optional: - check_functions.extend([check_msgpack, check_pyqt, check_curses]) - - # Unexpected exceptions are handled here - for check in check_functions: - try: - has_all_dependencies &= check() - except: # noqa:E722 - logger.exception('%s failed unexpectedly.', check.__name__) - has_all_dependencies = False - - if not has_all_dependencies: - sys.exit( - 'PyBitmessage cannot start. One or more dependencies are' - ' unavailable.' - ) - - -logger.setLevel(0) diff --git a/src/tests/mock/pybitmessage/fallback/__init__.py b/src/tests/mock/pybitmessage/fallback/__init__.py deleted file mode 100644 index 9a8d646f..00000000 --- a/src/tests/mock/pybitmessage/fallback/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Fallback expressions help PyBitmessage modules to run without some external -dependencies. - - -RIPEMD160Hash -------------- - -We need to check :mod:`hashlib` for RIPEMD-160, as it won't be available -if OpenSSL is not linked against or the linked OpenSSL has RIPEMD disabled. -Try to use `pycryptodome `_ -in that case. -""" - -import hashlib - -try: - hashlib.new('ripemd160') -except ValueError: - try: - from Crypto.Hash import RIPEMD - except ImportError: - RIPEMD160Hash = None - else: - RIPEMD160Hash = RIPEMD.RIPEMD160Hash -else: - def RIPEMD160Hash(data=None): - """hashlib based RIPEMD160Hash""" - hasher = hashlib.new('ripemd160') - if data: - hasher.update(data) - return hasher diff --git a/src/tests/mock/pybitmessage/fallback/umsgpack/__init__.py b/src/tests/mock/pybitmessage/fallback/umsgpack/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/tests/mock/pybitmessage/fallback/umsgpack/umsgpack.py b/src/tests/mock/pybitmessage/fallback/umsgpack/umsgpack.py deleted file mode 100644 index 34938614..00000000 --- a/src/tests/mock/pybitmessage/fallback/umsgpack/umsgpack.py +++ /dev/null @@ -1,1067 +0,0 @@ -# u-msgpack-python v2.4.1 - v at sergeev.io -# https://github.com/vsergeev/u-msgpack-python -# -# u-msgpack-python is a lightweight MessagePack serializer and deserializer -# module, compatible with both Python 2 and 3, as well CPython and PyPy -# implementations of Python. u-msgpack-python is fully compliant with the -# latest MessagePack specification.com/msgpack/msgpack/blob/master/spec.md). In -# particular, it supports the new binary, UTF-8 string, and application ext -# types. -# -# MIT License -# -# Copyright (c) 2013-2016 vsergeev / Ivan (Vanya) A. Sergeev -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. -# -""" -src/fallback/umsgpack/umsgpack.py -================================= - -u-msgpack-python v2.4.1 - v at sergeev.io -https://github.com/vsergeev/u-msgpack-python - -u-msgpack-python is a lightweight MessagePack serializer and deserializer -module, compatible with both Python 2 and 3, as well CPython and PyPy -implementations of Python. u-msgpack-python is fully compliant with the -latest MessagePack specification.com/msgpack/msgpack/blob/master/spec.md). In -particular, it supports the new binary, UTF-8 string, and application ext -types. - -License: MIT -""" -# pylint: disable=too-many-lines,too-many-branches,too-many-statements,global-statement,too-many-return-statements -# pylint: disable=unused-argument - -import collections -import io -import struct -import sys - -__version__ = "2.4.1" -"Module version string" - -version = (2, 4, 1) -"Module version tuple" - - -############################################################################## -# Ext Class -############################################################################## - -# Extension type for application-defined types and data -class Ext: # pylint: disable=old-style-class - """ - The Ext class facilitates creating a serializable extension object to store - an application-defined type and data byte array. - """ - - def __init__(self, type, data): - """ - Construct a new Ext object. - - Args: - type: application-defined type integer from 0 to 127 - data: application-defined data byte array - - Raises: - TypeError: - Specified ext type is outside of 0 to 127 range. - - Example: - >>> foo = umsgpack.Ext(0x05, b"\x01\x02\x03") - >>> umsgpack.packb({u"special stuff": foo, u"awesome": True}) - '\x82\xa7awesome\xc3\xadspecial stuff\xc7\x03\x05\x01\x02\x03' - >>> bar = umsgpack.unpackb(_) - >>> print(bar["special stuff"]) - Ext Object (Type: 0x05, Data: 01 02 03) - >>> - """ - # pylint:disable=redefined-builtin - - # Application ext type should be 0 <= type <= 127 - if not isinstance(type, int) or not (type >= 0 and type <= 127): - raise TypeError("ext type out of range") - # Check data is type bytes - elif sys.version_info[0] == 3 and not isinstance(data, bytes): - raise TypeError("ext data is not type \'bytes\'") - elif sys.version_info[0] == 2 and not isinstance(data, str): - raise TypeError("ext data is not type \'str\'") - self.type = type - self.data = data - - def __eq__(self, other): - """ - Compare this Ext object with another for equality. - """ - return (isinstance(other, self.__class__) and - self.type == other.type and - self.data == other.data) - - def __ne__(self, other): - """ - Compare this Ext object with another for inequality. - """ - return not self.__eq__(other) - - def __str__(self): - """ - String representation of this Ext object. - """ - s = "Ext Object (Type: 0x%02x, Data: " % self.type - s += " ".join(["0x%02x" % ord(self.data[i:i + 1]) - for i in xrange(min(len(self.data), 8))]) - if len(self.data) > 8: - s += " ..." - s += ")" - return s - - def __hash__(self): - """ - Provide a hash of this Ext object. - """ - return hash((self.type, self.data)) - - -class InvalidString(bytes): - """Subclass of bytes to hold invalid UTF-8 strings.""" - pass - -############################################################################## -# Exceptions -############################################################################## - - -# Base Exception classes -class PackException(Exception): - "Base class for exceptions encountered during packing." - pass - - -class UnpackException(Exception): - "Base class for exceptions encountered during unpacking." - pass - - -# Packing error -class UnsupportedTypeException(PackException): - "Object type not supported for packing." - pass - - -# Unpacking error -class InsufficientDataException(UnpackException): - "Insufficient data to unpack the serialized object." - pass - - -class InvalidStringException(UnpackException): - "Invalid UTF-8 string encountered during unpacking." - pass - - -class ReservedCodeException(UnpackException): - "Reserved code encountered during unpacking." - pass - - -class UnhashableKeyException(UnpackException): - """ - Unhashable key encountered during map unpacking. - The serialized map cannot be deserialized into a Python dictionary. - """ - pass - - -class DuplicateKeyException(UnpackException): - "Duplicate key encountered during map unpacking." - pass - - -# Backwards compatibility -KeyNotPrimitiveException = UnhashableKeyException -KeyDuplicateException = DuplicateKeyException - -############################################################################# -# Exported Functions and Glob -############################################################################# - -# Exported functions and variables, set up in __init() -pack = None -packb = None -unpack = None -unpackb = None -dump = None -dumps = None -load = None -loads = None - -compatibility = False -u""" -Compatibility mode boolean. - -When compatibility mode is enabled, u-msgpack-python will serialize both -unicode strings and bytes into the old "raw" msgpack type, and deserialize the -"raw" msgpack type into bytes. This provides backwards compatibility with the -old MessagePack specification. - -Example: ->>> umsgpack.compatibility = True ->>> ->>> umsgpack.packb([u"some string", b"some bytes"]) -b'\x92\xabsome string\xaasome bytes' ->>> umsgpack.unpackb(_) -[b'some string', b'some bytes'] ->>> -""" - -############################################################################## -# Packing -############################################################################## - -# You may notice struct.pack("B", obj) instead of the simpler chr(obj) in the -# code below. This is to allow for seamless Python 2 and 3 compatibility, as -# chr(obj) has a str return type instead of bytes in Python 3, and -# struct.pack(...) has the right return type in both versions. - - -def _pack_integer(obj, fp, options): - if obj < 0: - if obj >= -32: - fp.write(struct.pack("b", obj)) - elif obj >= -2**(8 - 1): - fp.write(b"\xd0" + struct.pack("b", obj)) - elif obj >= -2**(16 - 1): - fp.write(b"\xd1" + struct.pack(">h", obj)) - elif obj >= -2**(32 - 1): - fp.write(b"\xd2" + struct.pack(">i", obj)) - elif obj >= -2**(64 - 1): - fp.write(b"\xd3" + struct.pack(">q", obj)) - else: - raise UnsupportedTypeException("huge signed int") - else: - if obj <= 127: - fp.write(struct.pack("B", obj)) - elif obj <= 2**8 - 1: - fp.write(b"\xcc" + struct.pack("B", obj)) - elif obj <= 2**16 - 1: - fp.write(b"\xcd" + struct.pack(">H", obj)) - elif obj <= 2**32 - 1: - fp.write(b"\xce" + struct.pack(">I", obj)) - elif obj <= 2**64 - 1: - fp.write(b"\xcf" + struct.pack(">Q", obj)) - else: - raise UnsupportedTypeException("huge unsigned int") - - -def _pack_nil(obj, fp, options): - fp.write(b"\xc0") - - -def _pack_boolean(obj, fp, options): - fp.write(b"\xc3" if obj else b"\xc2") - - -def _pack_float(obj, fp, options): - float_precision = options.get('force_float_precision', _float_precision) - - if float_precision == "double": - fp.write(b"\xcb" + struct.pack(">d", obj)) - elif float_precision == "single": - fp.write(b"\xca" + struct.pack(">f", obj)) - else: - raise ValueError("invalid float precision") - - -def _pack_string(obj, fp, options): - obj = obj.encode('utf-8') - if len(obj) <= 31: - fp.write(struct.pack("B", 0xa0 | len(obj)) + obj) - elif len(obj) <= 2**8 - 1: - fp.write(b"\xd9" + struct.pack("B", len(obj)) + obj) - elif len(obj) <= 2**16 - 1: - fp.write(b"\xda" + struct.pack(">H", len(obj)) + obj) - elif len(obj) <= 2**32 - 1: - fp.write(b"\xdb" + struct.pack(">I", len(obj)) + obj) - else: - raise UnsupportedTypeException("huge string") - - -def _pack_binary(obj, fp, options): - if len(obj) <= 2**8 - 1: - fp.write(b"\xc4" + struct.pack("B", len(obj)) + obj) - elif len(obj) <= 2**16 - 1: - fp.write(b"\xc5" + struct.pack(">H", len(obj)) + obj) - elif len(obj) <= 2**32 - 1: - fp.write(b"\xc6" + struct.pack(">I", len(obj)) + obj) - else: - raise UnsupportedTypeException("huge binary string") - - -def _pack_oldspec_raw(obj, fp, options): - if len(obj) <= 31: - fp.write(struct.pack("B", 0xa0 | len(obj)) + obj) - elif len(obj) <= 2**16 - 1: - fp.write(b"\xda" + struct.pack(">H", len(obj)) + obj) - elif len(obj) <= 2**32 - 1: - fp.write(b"\xdb" + struct.pack(">I", len(obj)) + obj) - else: - raise UnsupportedTypeException("huge raw string") - - -def _pack_ext(obj, fp, options): - if len(obj.data) == 1: - fp.write(b"\xd4" + struct.pack("B", obj.type & 0xff) + obj.data) - elif len(obj.data) == 2: - fp.write(b"\xd5" + struct.pack("B", obj.type & 0xff) + obj.data) - elif len(obj.data) == 4: - fp.write(b"\xd6" + struct.pack("B", obj.type & 0xff) + obj.data) - elif len(obj.data) == 8: - fp.write(b"\xd7" + struct.pack("B", obj.type & 0xff) + obj.data) - elif len(obj.data) == 16: - fp.write(b"\xd8" + struct.pack("B", obj.type & 0xff) + obj.data) - elif len(obj.data) <= 2**8 - 1: - fp.write(b"\xc7" + - struct.pack("BB", len(obj.data), obj.type & 0xff) + obj.data) - elif len(obj.data) <= 2**16 - 1: - fp.write(b"\xc8" + - struct.pack(">HB", len(obj.data), obj.type & 0xff) + obj.data) - elif len(obj.data) <= 2**32 - 1: - fp.write(b"\xc9" + - struct.pack(">IB", len(obj.data), obj.type & 0xff) + obj.data) - else: - raise UnsupportedTypeException("huge ext data") - - -def _pack_array(obj, fp, options): - if len(obj) <= 15: - fp.write(struct.pack("B", 0x90 | len(obj))) - elif len(obj) <= 2**16 - 1: - fp.write(b"\xdc" + struct.pack(">H", len(obj))) - elif len(obj) <= 2**32 - 1: - fp.write(b"\xdd" + struct.pack(">I", len(obj))) - else: - raise UnsupportedTypeException("huge array") - - for e in obj: - pack(e, fp, **options) - - -def _pack_map(obj, fp, options): - if len(obj) <= 15: - fp.write(struct.pack("B", 0x80 | len(obj))) - elif len(obj) <= 2**16 - 1: - fp.write(b"\xde" + struct.pack(">H", len(obj))) - elif len(obj) <= 2**32 - 1: - fp.write(b"\xdf" + struct.pack(">I", len(obj))) - else: - raise UnsupportedTypeException("huge array") - - for k, v in obj.items(): - pack(k, fp, **options) - pack(v, fp, **options) - -######################################## - - -# Pack for Python 2, with 'unicode' type, 'str' type, and 'long' type -def _pack2(obj, fp, **options): - """ - Serialize a Python object into MessagePack bytes. - - Args: - obj: a Python object - fp: a .write()-supporting file-like object - - Kwargs: - ext_handlers (dict): dictionary of Ext handlers, mapping a custom type - to a callable that packs an instance of the type - into an Ext object - force_float_precision (str): "single" to force packing floats as - IEEE-754 single-precision floats, - "double" to force packing floats as - IEEE-754 double-precision floats. - - Returns: - None. - - Raises: - UnsupportedType(PackException): - Object type not supported for packing. - - Example: - >>> f = open('test.bin', 'wb') - >>> umsgpack.pack({u"compact": True, u"schema": 0}, f) - >>> - """ - global compatibility - - ext_handlers = options.get("ext_handlers") - - if obj is None: - _pack_nil(obj, fp, options) - elif ext_handlers and obj.__class__ in ext_handlers: - _pack_ext(ext_handlers[obj.__class__](obj), fp, options) - elif isinstance(obj, bool): - _pack_boolean(obj, fp, options) - elif isinstance(obj, (int, long)): - _pack_integer(obj, fp, options) - elif isinstance(obj, float): - _pack_float(obj, fp, options) - elif compatibility and isinstance(obj, unicode): - _pack_oldspec_raw(bytes(obj), fp, options) - elif compatibility and isinstance(obj, bytes): - _pack_oldspec_raw(obj, fp, options) - elif isinstance(obj, unicode): - _pack_string(obj, fp, options) - elif isinstance(obj, str): - _pack_binary(obj, fp, options) - elif isinstance(obj, (list, tuple)): - _pack_array(obj, fp, options) - elif isinstance(obj, dict): - _pack_map(obj, fp, options) - elif isinstance(obj, Ext): - _pack_ext(obj, fp, options) - elif ext_handlers: - # Linear search for superclass - t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None) - if t: - _pack_ext(ext_handlers[t](obj), fp, options) - else: - raise UnsupportedTypeException( - "unsupported type: %s" % str(type(obj))) - else: - raise UnsupportedTypeException("unsupported type: %s" % str(type(obj))) - - -# Pack for Python 3, with unicode 'str' type, 'bytes' type, and no 'long' type -def _pack3(obj, fp, **options): - """ - Serialize a Python object into MessagePack bytes. - - Args: - obj: a Python object - fp: a .write()-supporting file-like object - - Kwargs: - ext_handlers (dict): dictionary of Ext handlers, mapping a custom type - to a callable that packs an instance of the type - into an Ext object - force_float_precision (str): "single" to force packing floats as - IEEE-754 single-precision floats, - "double" to force packing floats as - IEEE-754 double-precision floats. - - Returns: - None. - - Raises: - UnsupportedType(PackException): - Object type not supported for packing. - - Example: - >>> f = open('test.bin', 'wb') - >>> umsgpack.pack({u"compact": True, u"schema": 0}, f) - >>> - """ - global compatibility - - ext_handlers = options.get("ext_handlers") - - if obj is None: - _pack_nil(obj, fp, options) - elif ext_handlers and obj.__class__ in ext_handlers: - _pack_ext(ext_handlers[obj.__class__](obj), fp, options) - elif isinstance(obj, bool): - _pack_boolean(obj, fp, options) - elif isinstance(obj, int): - _pack_integer(obj, fp, options) - elif isinstance(obj, float): - _pack_float(obj, fp, options) - elif compatibility and isinstance(obj, str): - _pack_oldspec_raw(obj.encode('utf-8'), fp, options) - elif compatibility and isinstance(obj, bytes): - _pack_oldspec_raw(obj, fp, options) - elif isinstance(obj, str): - _pack_string(obj, fp, options) - elif isinstance(obj, bytes): - _pack_binary(obj, fp, options) - elif isinstance(obj, (list, tuple)): - _pack_array(obj, fp, options) - elif isinstance(obj, dict): - _pack_map(obj, fp, options) - elif isinstance(obj, Ext): - _pack_ext(obj, fp, options) - elif ext_handlers: - # Linear search for superclass - t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None) - if t: - _pack_ext(ext_handlers[t](obj), fp, options) - else: - raise UnsupportedTypeException( - "unsupported type: %s" % str(type(obj))) - else: - raise UnsupportedTypeException( - "unsupported type: %s" % str(type(obj))) - - -def _packb2(obj, **options): - """ - Serialize a Python object into MessagePack bytes. - - Args: - obj: a Python object - - Kwargs: - ext_handlers (dict): dictionary of Ext handlers, mapping a custom type - to a callable that packs an instance of the type - into an Ext object - force_float_precision (str): "single" to force packing floats as - IEEE-754 single-precision floats, - "double" to force packing floats as - IEEE-754 double-precision floats. - - Returns: - A 'str' containing serialized MessagePack bytes. - - Raises: - UnsupportedType(PackException): - Object type not supported for packing. - - Example: - >>> umsgpack.packb({u"compact": True, u"schema": 0}) - '\x82\xa7compact\xc3\xa6schema\x00' - >>> - """ - fp = io.BytesIO() - _pack2(obj, fp, **options) - return fp.getvalue() - - -def _packb3(obj, **options): - """ - Serialize a Python object into MessagePack bytes. - - Args: - obj: a Python object - - Kwargs: - ext_handlers (dict): dictionary of Ext handlers, mapping a custom type - to a callable that packs an instance of the type - into an Ext object - force_float_precision (str): "single" to force packing floats as - IEEE-754 single-precision floats, - "double" to force packing floats as - IEEE-754 double-precision floats. - - Returns: - A 'bytes' containing serialized MessagePack bytes. - - Raises: - UnsupportedType(PackException): - Object type not supported for packing. - - Example: - >>> umsgpack.packb({u"compact": True, u"schema": 0}) - b'\x82\xa7compact\xc3\xa6schema\x00' - >>> - """ - fp = io.BytesIO() - _pack3(obj, fp, **options) - return fp.getvalue() - -############################################################################# -# Unpacking -############################################################################# - - -def _read_except(fp, n): - data = fp.read(n) - if len(data) < n: - raise InsufficientDataException() - return data - - -def _unpack_integer(code, fp, options): - if (ord(code) & 0xe0) == 0xe0: - return struct.unpack("b", code)[0] - elif code == b'\xd0': - return struct.unpack("b", _read_except(fp, 1))[0] - elif code == b'\xd1': - return struct.unpack(">h", _read_except(fp, 2))[0] - elif code == b'\xd2': - return struct.unpack(">i", _read_except(fp, 4))[0] - elif code == b'\xd3': - return struct.unpack(">q", _read_except(fp, 8))[0] - elif (ord(code) & 0x80) == 0x00: - return struct.unpack("B", code)[0] - elif code == b'\xcc': - return struct.unpack("B", _read_except(fp, 1))[0] - elif code == b'\xcd': - return struct.unpack(">H", _read_except(fp, 2))[0] - elif code == b'\xce': - return struct.unpack(">I", _read_except(fp, 4))[0] - elif code == b'\xcf': - return struct.unpack(">Q", _read_except(fp, 8))[0] - raise Exception("logic error, not int: 0x%02x" % ord(code)) - - -def _unpack_reserved(code, fp, options): - if code == b'\xc1': - raise ReservedCodeException( - "encountered reserved code: 0x%02x" % ord(code)) - raise Exception( - "logic error, not reserved code: 0x%02x" % ord(code)) - - -def _unpack_nil(code, fp, options): - if code == b'\xc0': - return None - raise Exception("logic error, not nil: 0x%02x" % ord(code)) - - -def _unpack_boolean(code, fp, options): - if code == b'\xc2': - return False - elif code == b'\xc3': - return True - raise Exception("logic error, not boolean: 0x%02x" % ord(code)) - - -def _unpack_float(code, fp, options): - if code == b'\xca': - return struct.unpack(">f", _read_except(fp, 4))[0] - elif code == b'\xcb': - return struct.unpack(">d", _read_except(fp, 8))[0] - raise Exception("logic error, not float: 0x%02x" % ord(code)) - - -def _unpack_string(code, fp, options): - if (ord(code) & 0xe0) == 0xa0: - length = ord(code) & ~0xe0 - elif code == b'\xd9': - length = struct.unpack("B", _read_except(fp, 1))[0] - elif code == b'\xda': - length = struct.unpack(">H", _read_except(fp, 2))[0] - elif code == b'\xdb': - length = struct.unpack(">I", _read_except(fp, 4))[0] - else: - raise Exception("logic error, not string: 0x%02x" % ord(code)) - - # Always return raw bytes in compatibility mode - global compatibility - if compatibility: - return _read_except(fp, length) - - data = _read_except(fp, length) - try: - return bytes.decode(data, 'utf-8') - except UnicodeDecodeError: - if options.get("allow_invalid_utf8"): - return InvalidString(data) - raise InvalidStringException("unpacked string is invalid utf-8") - - -def _unpack_binary(code, fp, options): - if code == b'\xc4': - length = struct.unpack("B", _read_except(fp, 1))[0] - elif code == b'\xc5': - length = struct.unpack(">H", _read_except(fp, 2))[0] - elif code == b'\xc6': - length = struct.unpack(">I", _read_except(fp, 4))[0] - else: - raise Exception("logic error, not binary: 0x%02x" % ord(code)) - - return _read_except(fp, length) - - -def _unpack_ext(code, fp, options): - if code == b'\xd4': - length = 1 - elif code == b'\xd5': - length = 2 - elif code == b'\xd6': - length = 4 - elif code == b'\xd7': - length = 8 - elif code == b'\xd8': - length = 16 - elif code == b'\xc7': - length = struct.unpack("B", _read_except(fp, 1))[0] - elif code == b'\xc8': - length = struct.unpack(">H", _read_except(fp, 2))[0] - elif code == b'\xc9': - length = struct.unpack(">I", _read_except(fp, 4))[0] - else: - raise Exception("logic error, not ext: 0x%02x" % ord(code)) - - ext = Ext(ord(_read_except(fp, 1)), _read_except(fp, length)) - - # Unpack with ext handler, if we have one - ext_handlers = options.get("ext_handlers") - if ext_handlers and ext.type in ext_handlers: - ext = ext_handlers[ext.type](ext) - - return ext - - -def _unpack_array(code, fp, options): - if (ord(code) & 0xf0) == 0x90: - length = (ord(code) & ~0xf0) - elif code == b'\xdc': - length = struct.unpack(">H", _read_except(fp, 2))[0] - elif code == b'\xdd': - length = struct.unpack(">I", _read_except(fp, 4))[0] - else: - raise Exception("logic error, not array: 0x%02x" % ord(code)) - - return [_unpack(fp, options) for _ in xrange(length)] - - -def _deep_list_to_tuple(obj): - if isinstance(obj, list): - return tuple([_deep_list_to_tuple(e) for e in obj]) - return obj - - -def _unpack_map(code, fp, options): - if (ord(code) & 0xf0) == 0x80: - length = (ord(code) & ~0xf0) - elif code == b'\xde': - length = struct.unpack(">H", _read_except(fp, 2))[0] - elif code == b'\xdf': - length = struct.unpack(">I", _read_except(fp, 4))[0] - else: - raise Exception("logic error, not map: 0x%02x" % ord(code)) - - d = {} if not options.get('use_ordered_dict') \ - else collections.OrderedDict() - for _ in xrange(length): - # Unpack key - k = _unpack(fp, options) - - if isinstance(k, list): - # Attempt to convert list into a hashable tuple - k = _deep_list_to_tuple(k) - elif not isinstance(k, collections.Hashable): - raise UnhashableKeyException( - "encountered unhashable key: %s, %s" % (str(k), str(type(k)))) - elif k in d: - raise DuplicateKeyException( - "encountered duplicate key: %s, %s" % (str(k), str(type(k)))) - - # Unpack value - v = _unpack(fp, options) - - try: - d[k] = v - except TypeError: - raise UnhashableKeyException( - "encountered unhashable key: %s" % str(k)) - return d - - -def _unpack(fp, options): - code = _read_except(fp, 1) - return _unpack_dispatch_table[code](code, fp, options) - -######################################## - - -def _unpack2(fp, **options): - """ - Deserialize MessagePack bytes into a Python object. - - Args: - fp: a .read()-supporting file-like object - - Kwargs: - ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext - type to a callable that unpacks an instance of - Ext into an object - use_ordered_dict (bool): unpack maps into OrderedDict, instead of - unordered dict (default False) - allow_invalid_utf8 (bool): unpack invalid strings into instances of - InvalidString, for access to the bytes - (default False) - - Returns: - A Python object. - - Raises: - InsufficientDataException(UnpackException): - Insufficient data to unpack the serialized object. - InvalidStringException(UnpackException): - Invalid UTF-8 string encountered during unpacking. - ReservedCodeException(UnpackException): - Reserved code encountered during unpacking. - UnhashableKeyException(UnpackException): - Unhashable key encountered during map unpacking. - The serialized map cannot be deserialized into a Python dictionary. - DuplicateKeyException(UnpackException): - Duplicate key encountered during map unpacking. - - Example: - >>> f = open('test.bin', 'rb') - >>> umsgpack.unpackb(f) - {u'compact': True, u'schema': 0} - >>> - """ - return _unpack(fp, options) - - -def _unpack3(fp, **options): - """ - Deserialize MessagePack bytes into a Python object. - - Args: - fp: a .read()-supporting file-like object - - Kwargs: - ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext - type to a callable that unpacks an instance of - Ext into an object - use_ordered_dict (bool): unpack maps into OrderedDict, instead of - unordered dict (default False) - allow_invalid_utf8 (bool): unpack invalid strings into instances of - InvalidString, for access to the bytes - (default False) - - Returns: - A Python object. - - Raises: - InsufficientDataException(UnpackException): - Insufficient data to unpack the serialized object. - InvalidStringException(UnpackException): - Invalid UTF-8 string encountered during unpacking. - ReservedCodeException(UnpackException): - Reserved code encountered during unpacking. - UnhashableKeyException(UnpackException): - Unhashable key encountered during map unpacking. - The serialized map cannot be deserialized into a Python dictionary. - DuplicateKeyException(UnpackException): - Duplicate key encountered during map unpacking. - - Example: - >>> f = open('test.bin', 'rb') - >>> umsgpack.unpackb(f) - {'compact': True, 'schema': 0} - >>> - """ - return _unpack(fp, options) - - -# For Python 2, expects a str object -def _unpackb2(s, **options): - """ - Deserialize MessagePack bytes into a Python object. - - Args: - s: a 'str' or 'bytearray' containing serialized MessagePack bytes - - Kwargs: - ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext - type to a callable that unpacks an instance of - Ext into an object - use_ordered_dict (bool): unpack maps into OrderedDict, instead of - unordered dict (default False) - allow_invalid_utf8 (bool): unpack invalid strings into instances of - InvalidString, for access to the bytes - (default False) - - Returns: - A Python object. - - Raises: - TypeError: - Packed data type is neither 'str' nor 'bytearray'. - InsufficientDataException(UnpackException): - Insufficient data to unpack the serialized object. - InvalidStringException(UnpackException): - Invalid UTF-8 string encountered during unpacking. - ReservedCodeException(UnpackException): - Reserved code encountered during unpacking. - UnhashableKeyException(UnpackException): - Unhashable key encountered during map unpacking. - The serialized map cannot be deserialized into a Python dictionary. - DuplicateKeyException(UnpackException): - Duplicate key encountered during map unpacking. - - Example: - >>> umsgpack.unpackb(b'\x82\xa7compact\xc3\xa6schema\x00') - {u'compact': True, u'schema': 0} - >>> - """ - if not isinstance(s, (str, bytearray)): - raise TypeError("packed data must be type 'str' or 'bytearray'") - return _unpack(io.BytesIO(s), options) - - -# For Python 3, expects a bytes object -def _unpackb3(s, **options): - """ - Deserialize MessagePack bytes into a Python object. - - Args: - s: a 'bytes' or 'bytearray' containing serialized MessagePack bytes - - Kwargs: - ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext - type to a callable that unpacks an instance of - Ext into an object - use_ordered_dict (bool): unpack maps into OrderedDict, instead of - unordered dict (default False) - allow_invalid_utf8 (bool): unpack invalid strings into instances of - InvalidString, for access to the bytes - (default False) - - Returns: - A Python object. - - Raises: - TypeError: - Packed data type is neither 'bytes' nor 'bytearray'. - InsufficientDataException(UnpackException): - Insufficient data to unpack the serialized object. - InvalidStringException(UnpackException): - Invalid UTF-8 string encountered during unpacking. - ReservedCodeException(UnpackException): - Reserved code encountered during unpacking. - UnhashableKeyException(UnpackException): - Unhashable key encountered during map unpacking. - The serialized map cannot be deserialized into a Python dictionary. - DuplicateKeyException(UnpackException): - Duplicate key encountered during map unpacking. - - Example: - >>> umsgpack.unpackb(b'\x82\xa7compact\xc3\xa6schema\x00') - {'compact': True, 'schema': 0} - >>> - """ - if not isinstance(s, (bytes, bytearray)): - raise TypeError("packed data must be type 'bytes' or 'bytearray'") - return _unpack(io.BytesIO(s), options) - -############################################################################# -# Module Initialization -############################################################################# - - -def __init(): - # pylint: disable=global-variable-undefined - - global pack - global packb - global unpack - global unpackb - global dump - global dumps - global load - global loads - global compatibility - global _float_precision - global _unpack_dispatch_table - global xrange - - # Compatibility mode for handling strings/bytes with the old specification - compatibility = False - - # Auto-detect system float precision - if sys.float_info.mant_dig == 53: - _float_precision = "double" - else: - _float_precision = "single" - - # Map packb and unpackb to the appropriate version - if sys.version_info[0] == 3: - pack = _pack3 - packb = _packb3 - dump = _pack3 - dumps = _packb3 - unpack = _unpack3 - unpackb = _unpackb3 - load = _unpack3 - loads = _unpackb3 - xrange = range # pylint: disable=redefined-builtin - else: - pack = _pack2 - packb = _packb2 - dump = _pack2 - dumps = _packb2 - unpack = _unpack2 - unpackb = _unpackb2 - load = _unpack2 - loads = _unpackb2 - - # Build a dispatch table for fast lookup of unpacking function - - _unpack_dispatch_table = {} - # Fix uint - for code in range(0, 0x7f + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer - # Fix map - for code in range(0x80, 0x8f + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_map - # Fix array - for code in range(0x90, 0x9f + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_array - # Fix str - for code in range(0xa0, 0xbf + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_string - # Nil - _unpack_dispatch_table[b'\xc0'] = _unpack_nil - # Reserved - _unpack_dispatch_table[b'\xc1'] = _unpack_reserved - # Boolean - _unpack_dispatch_table[b'\xc2'] = _unpack_boolean - _unpack_dispatch_table[b'\xc3'] = _unpack_boolean - # Bin - for code in range(0xc4, 0xc6 + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_binary - # Ext - for code in range(0xc7, 0xc9 + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_ext - # Float - _unpack_dispatch_table[b'\xca'] = _unpack_float - _unpack_dispatch_table[b'\xcb'] = _unpack_float - # Uint - for code in range(0xcc, 0xcf + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer - # Int - for code in range(0xd0, 0xd3 + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer - # Fixext - for code in range(0xd4, 0xd8 + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_ext - # String - for code in range(0xd9, 0xdb + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_string - # Array - _unpack_dispatch_table[b'\xdc'] = _unpack_array - _unpack_dispatch_table[b'\xdd'] = _unpack_array - # Map - _unpack_dispatch_table[b'\xde'] = _unpack_map - _unpack_dispatch_table[b'\xdf'] = _unpack_map - # Negative fixint - for code in range(0xe0, 0xff + 1): - _unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer - - -__init() diff --git a/src/tests/mock/pybitmessage/helper_ackPayload.py b/src/tests/mock/pybitmessage/helper_ackPayload.py deleted file mode 100644 index d30f4c0d..00000000 --- a/src/tests/mock/pybitmessage/helper_ackPayload.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -This module is for generating ack payload -""" - -from binascii import hexlify -from struct import pack - -import helper_random -import highlevelcrypto -from addresses import encodeVarint - - -def genAckPayload(streamNumber=1, stealthLevel=0): - """ - Generate and return payload obj. - - This function generates payload objects for message acknowledgements - Several stealth levels are available depending on the privacy needs; - a higher level means better stealth, but also higher cost (size+POW) - - - level 0: a random 32-byte sequence with a message header appended - - level 1: a getpubkey request for a (random) dummy key hash - - level 2: a standard message, encrypted to a random pubkey - """ - if stealthLevel == 2: # Generate privacy-enhanced payload - # Generate a dummy privkey and derive the pubkey - dummyPubKeyHex = highlevelcrypto.privToPub( - hexlify(helper_random.randomBytes(32))) - # Generate a dummy message of random length - # (the smallest possible standard-formatted message is 234 bytes) - dummyMessage = helper_random.randomBytes( - helper_random.randomrandrange(234, 801)) - # Encrypt the message using standard BM encryption (ECIES) - ackdata = highlevelcrypto.encrypt(dummyMessage, dummyPubKeyHex) - acktype = 2 # message - version = 1 - - elif stealthLevel == 1: # Basic privacy payload (random getpubkey) - ackdata = helper_random.randomBytes(32) - acktype = 0 # getpubkey - version = 4 - - else: # Minimum viable payload (non stealth) - ackdata = helper_random.randomBytes(32) - acktype = 2 # message - version = 1 - - ackobject = pack('>I', acktype) + encodeVarint( - version) + encodeVarint(streamNumber) + ackdata - - return ackobject diff --git a/src/tests/mock/pybitmessage/helper_addressbook.py b/src/tests/mock/pybitmessage/helper_addressbook.py deleted file mode 100644 index fb572150..00000000 --- a/src/tests/mock/pybitmessage/helper_addressbook.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Insert value into addressbook -""" - -from bmconfigparser import BMConfigParser -from helper_sql import sqlExecute - - -def insert(address, label): - """perform insert into addressbook""" - - if address not in BMConfigParser().addresses(): - return sqlExecute('''INSERT INTO addressbook VALUES (?,?)''', label, address) == 1 - return False diff --git a/src/tests/mock/pybitmessage/helper_bitcoin.py b/src/tests/mock/pybitmessage/helper_bitcoin.py deleted file mode 100644 index d4f1d105..00000000 --- a/src/tests/mock/pybitmessage/helper_bitcoin.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Calculates bitcoin and testnet address from pubkey -""" - -import hashlib - -from debug import logger -from pyelliptic import arithmetic - - -def calculateBitcoinAddressFromPubkey(pubkey): - """Calculate bitcoin address from given pubkey (65 bytes long hex string)""" - if len(pubkey) != 65: - logger.error('Could not calculate Bitcoin address from pubkey because' - ' function was passed a pubkey that was' - ' %i bytes long rather than 65.', len(pubkey)) - return "error" - ripe = hashlib.new('ripemd160') - sha = hashlib.new('sha256') - sha.update(pubkey) - ripe.update(sha.digest()) - ripeWithProdnetPrefix = '\x00' + ripe.digest() - - checksum = hashlib.sha256(hashlib.sha256( - ripeWithProdnetPrefix).digest()).digest()[:4] - binaryBitcoinAddress = ripeWithProdnetPrefix + checksum - numberOfZeroBytesOnBinaryBitcoinAddress = 0 - while binaryBitcoinAddress[0] == '\x00': - numberOfZeroBytesOnBinaryBitcoinAddress += 1 - binaryBitcoinAddress = binaryBitcoinAddress[1:] - base58encoded = arithmetic.changebase(binaryBitcoinAddress, 256, 58) - return "1" * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded - - -def calculateTestnetAddressFromPubkey(pubkey): - """This function expects that pubkey begin with the testnet prefix""" - if len(pubkey) != 65: - logger.error('Could not calculate Bitcoin address from pubkey because' - ' function was passed a pubkey that was' - ' %i bytes long rather than 65.', len(pubkey)) - return "error" - ripe = hashlib.new('ripemd160') - sha = hashlib.new('sha256') - sha.update(pubkey) - ripe.update(sha.digest()) - ripeWithProdnetPrefix = '\x6F' + ripe.digest() - - checksum = hashlib.sha256(hashlib.sha256( - ripeWithProdnetPrefix).digest()).digest()[:4] - binaryBitcoinAddress = ripeWithProdnetPrefix + checksum - numberOfZeroBytesOnBinaryBitcoinAddress = 0 - while binaryBitcoinAddress[0] == '\x00': - numberOfZeroBytesOnBinaryBitcoinAddress += 1 - binaryBitcoinAddress = binaryBitcoinAddress[1:] - base58encoded = arithmetic.changebase(binaryBitcoinAddress, 256, 58) - return "1" * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded diff --git a/src/tests/mock/pybitmessage/helper_inbox.py b/src/tests/mock/pybitmessage/helper_inbox.py deleted file mode 100644 index d99e9544..00000000 --- a/src/tests/mock/pybitmessage/helper_inbox.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Helper Inbox performs inbox messages related operations""" - -import queues -from helper_sql import sqlExecute, sqlQuery - - -def insert(t): - """Perform an insert into the "inbox" table""" - sqlExecute('''INSERT INTO inbox VALUES (?,?,?,?,?,?,?,?,?,?)''', *t) - # shouldn't emit changedInboxUnread and displayNewInboxMessage - # at the same time - # queues.UISignalQueue.put(('changedInboxUnread', None)) - - -def trash(msgid): - """Mark a message in the `inbox` as `trash`""" - sqlExecute('''UPDATE inbox SET folder='trash' WHERE msgid=?''', msgid) - queues.UISignalQueue.put(('removeInboxRowByMsgid', msgid)) - - -def undeleteMessage(msgid): - """Undelte the message""" - sqlExecute('''UPDATE inbox SET folder='inbox' WHERE msgid=?''', msgid) - - -def isMessageAlreadyInInbox(sigHash): - """Check for previous instances of this message""" - queryReturn = sqlQuery( - '''SELECT COUNT(*) FROM inbox WHERE sighash=?''', sigHash) - return queryReturn[0][0] != 0 diff --git a/src/tests/mock/pybitmessage/helper_msgcoding.py b/src/tests/mock/pybitmessage/helper_msgcoding.py deleted file mode 100644 index 28f92288..00000000 --- a/src/tests/mock/pybitmessage/helper_msgcoding.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -Message encoding end decoding functions -""" - -import string -import zlib - -import messagetypes -from bmconfigparser import BMConfigParser -from debug import logger -from tr import _translate - -try: - import msgpack -except ImportError: - try: - import umsgpack as msgpack - except ImportError: - import fallback.umsgpack.umsgpack as msgpack - -BITMESSAGE_ENCODING_IGNORE = 0 -BITMESSAGE_ENCODING_TRIVIAL = 1 -BITMESSAGE_ENCODING_SIMPLE = 2 -BITMESSAGE_ENCODING_EXTENDED = 3 - - -class MsgEncodeException(Exception): - """Exception during message encoding""" - pass - - -class MsgDecodeException(Exception): - """Exception during message decoding""" - pass - - -class DecompressionSizeException(MsgDecodeException): - # pylint: disable=super-init-not-called - """Decompression resulted in too much data (attack protection)""" - def __init__(self, size): - self.size = size - - -class MsgEncode(object): - """Message encoder class""" - def __init__(self, message, encoding=BITMESSAGE_ENCODING_SIMPLE): - self.data = None - self.encoding = encoding - self.length = 0 - if self.encoding == BITMESSAGE_ENCODING_EXTENDED: - self.encodeExtended(message) - elif self.encoding == BITMESSAGE_ENCODING_SIMPLE: - self.encodeSimple(message) - elif self.encoding == BITMESSAGE_ENCODING_TRIVIAL: - self.encodeTrivial(message) - else: - raise MsgEncodeException("Unknown encoding %i" % (encoding)) - - def encodeExtended(self, message): - """Handle extended encoding""" - try: - msgObj = messagetypes.message.Message() - self.data = zlib.compress(msgpack.dumps(msgObj.encode(message)), 9) - except zlib.error: - logger.error("Error compressing message") - raise MsgEncodeException("Error compressing message") - except msgpack.exceptions.PackException: - logger.error("Error msgpacking message") - raise MsgEncodeException("Error msgpacking message") - self.length = len(self.data) - - def encodeSimple(self, message): - """Handle simple encoding""" - self.data = 'Subject:%(subject)s\nBody:%(body)s' % message - self.length = len(self.data) - - def encodeTrivial(self, message): - """Handle trivial encoding""" - self.data = message['body'] - self.length = len(self.data) - - -class MsgDecode(object): - """Message decoder class""" - def __init__(self, encoding, data): - self.encoding = encoding - if self.encoding == BITMESSAGE_ENCODING_EXTENDED: - self.decodeExtended(data) - elif self.encoding in ( - BITMESSAGE_ENCODING_SIMPLE, BITMESSAGE_ENCODING_TRIVIAL): - self.decodeSimple(data) - else: - self.body = _translate( - "MsgDecode", - "The message has an unknown encoding.\n" - "Perhaps you should upgrade Bitmessage.") - self.subject = _translate("MsgDecode", "Unknown encoding") - - def decodeExtended(self, data): - """Handle extended encoding""" - dc = zlib.decompressobj() - tmp = "" - while len(tmp) <= BMConfigParser().safeGetInt("zlib", "maxsize"): - try: - got = dc.decompress( - data, BMConfigParser().safeGetInt("zlib", "maxsize") - + 1 - len(tmp)) - # EOF - if got == "": - break - tmp += got - data = dc.unconsumed_tail - except zlib.error: - logger.error("Error decompressing message") - raise MsgDecodeException("Error decompressing message") - else: - raise DecompressionSizeException(len(tmp)) - - try: - tmp = msgpack.loads(tmp) - except (msgpack.exceptions.UnpackException, - msgpack.exceptions.ExtraData): - logger.error("Error msgunpacking message") - raise MsgDecodeException("Error msgunpacking message") - - try: - msgType = tmp[""] - except KeyError: - logger.error("Message type missing") - raise MsgDecodeException("Message type missing") - - msgObj = messagetypes.constructObject(tmp) - if msgObj is None: - raise MsgDecodeException("Malformed message") - try: - msgObj.process() - except: # noqa:E722 - raise MsgDecodeException("Malformed message") - if msgType == "message": - self.subject = msgObj.subject - self.body = msgObj.body - - def decodeSimple(self, data): - """Handle simple encoding""" - bodyPositionIndex = string.find(data, '\nBody:') - if bodyPositionIndex > 1: - subject = data[8:bodyPositionIndex] - # Only save and show the first 500 characters of the subject. - # Any more is probably an attack. - subject = subject[:500] - body = data[bodyPositionIndex + 6:] - else: - subject = '' - body = data - # Throw away any extra lines (headers) after the subject. - if subject: - subject = subject.splitlines()[0] - self.subject = subject - self.body = body diff --git a/src/tests/mock/pybitmessage/helper_random.py b/src/tests/mock/pybitmessage/helper_random.py deleted file mode 100644 index 43194d4e..00000000 --- a/src/tests/mock/pybitmessage/helper_random.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Convenience functions for random operations. Not suitable for security / cryptography operations.""" - -import os -import random - -try: - from pyelliptic.openssl import OpenSSL -except ImportError: - from .openssl import OpenSSL - -NoneType = type(None) - - -def seed(): - """Initialize random number generator""" - random.seed() - - -def randomBytes(n): - """Method randomBytes.""" - try: - return os.urandom(n) - except NotImplementedError: - return OpenSSL.rand(n) - - -def randomshuffle(population): - """Method randomShuffle. - - shuffle the sequence x in place. - shuffles the elements in list in place, - so they are in a random order. - As Shuffle will alter data in-place, - so its input must be a mutable sequence. - In contrast, sample produces a new list - and its input can be much more varied - (tuple, string, xrange, bytearray, set, etc) - """ - random.shuffle(population) - - -def randomsample(population, k): - """Method randomSample. - - return a k length list of unique elements - chosen from the population sequence. - Used for random sampling - without replacement, its called - partial shuffle. - """ - return random.sample(population, k) - - -def randomrandrange(x, y=None): - """Method randomRandrange. - - return a randomly selected element from - range(start, stop). This is equivalent to - choice(range(start, stop)), - but doesnt actually build a range object. - """ - if isinstance(y, NoneType): - return random.randrange(x) # nosec - return random.randrange(x, y) # nosec - - -def randomchoice(population): - """Method randomchoice. - - Return a random element from the non-empty - sequence seq. If seq is empty, raises - IndexError. - """ - return random.choice(population) # nosec diff --git a/src/tests/mock/pybitmessage/helper_search.py b/src/tests/mock/pybitmessage/helper_search.py deleted file mode 100644 index 9fcb88b5..00000000 --- a/src/tests/mock/pybitmessage/helper_search.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -Additional SQL helper for searching messages. -Used by :mod:`.bitmessageqt`. -""" - -from helper_sql import sqlQuery -from tr import _translate - - -def search_sql( - xAddress='toaddress', account=None, folder='inbox', where=None, - what=None, unreadOnly=False -): - """ - Search for messages from given account and folder having search term - in one of it's fields. - - :param str xAddress: address field checked - ('fromaddress', 'toaddress' or 'both') - :param account: the account which is checked - :type account: :class:`.bitmessageqt.account.BMAccount` - instance - :param str folder: the folder which is checked - :param str where: message field which is checked ('toaddress', - 'fromaddress', 'subject' or 'message'), by default check any field - :param str what: the search term - :param bool unreadOnly: if True, search only for unread messages - :return: all messages where field contains - :rtype: list[list] - """ - # pylint: disable=too-many-arguments, too-many-branches - if what: - what = '%' + what + '%' - if where == _translate("MainWindow", "To"): - where = 'toaddress' - elif where == _translate("MainWindow", "From"): - where = 'fromaddress' - elif where == _translate("MainWindow", "Subject"): - where = 'subject' - elif where == _translate("MainWindow", "Message"): - where = 'message' - else: - where = 'toaddress || fromaddress || subject || message' - - sqlStatementBase = 'SELECT toaddress, fromaddress, subject, ' + ( - 'status, ackdata, lastactiontime FROM sent ' if folder == 'sent' - else 'folder, msgid, received, read FROM inbox ' - ) - - sqlStatementParts = [] - sqlArguments = [] - if account is not None: - if xAddress == 'both': - sqlStatementParts.append('(fromaddress = ? OR toaddress = ?)') - sqlArguments.append(account) - sqlArguments.append(account) - else: - sqlStatementParts.append(xAddress + ' = ? ') - sqlArguments.append(account) - if folder is not None: - if folder == 'new': - folder = 'inbox' - unreadOnly = True - sqlStatementParts.append('folder = ? ') - sqlArguments.append(folder) - else: - sqlStatementParts.append('folder != ?') - sqlArguments.append('trash') - if what: - sqlStatementParts.append('%s LIKE ?' % (where)) - sqlArguments.append(what) - if unreadOnly: - sqlStatementParts.append('read = 0') - if sqlStatementParts: - sqlStatementBase += 'WHERE ' + ' AND '.join(sqlStatementParts) - if folder == 'sent': - sqlStatementBase += ' ORDER BY lastactiontime' - return sqlQuery(sqlStatementBase, sqlArguments) - - -def check_match( - toAddress, fromAddress, subject, message, where=None, what=None): - """ - Check if a single message matches a filter (used when new messages - are added to messagelists) - """ - # pylint: disable=too-many-arguments - if not what: - return True - - if where in ( - _translate("MainWindow", "To"), _translate("MainWindow", "All") - ): - if what.lower() not in toAddress.lower(): - return False - elif where in ( - _translate("MainWindow", "From"), _translate("MainWindow", "All") - ): - if what.lower() not in fromAddress.lower(): - return False - elif where in ( - _translate("MainWindow", "Subject"), - _translate("MainWindow", "All") - ): - if what.lower() not in subject.lower(): - return False - elif where in ( - _translate("MainWindow", "Message"), - _translate("MainWindow", "All") - ): - if what.lower() not in message.lower(): - return False - return True diff --git a/src/tests/mock/pybitmessage/helper_sent.py b/src/tests/mock/pybitmessage/helper_sent.py deleted file mode 100644 index d83afce6..00000000 --- a/src/tests/mock/pybitmessage/helper_sent.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Insert values into sent table -""" - -import time -import uuid -from addresses import decodeAddress -from bmconfigparser import BMConfigParser -from helper_ackPayload import genAckPayload -from helper_sql import sqlExecute - - -# pylint: disable=too-many-arguments -def insert(msgid=None, toAddress='[Broadcast subscribers]', fromAddress=None, subject=None, - message=None, status='msgqueued', ripe=None, ackdata=None, sentTime=None, - lastActionTime=None, sleeptill=0, retryNumber=0, encoding=2, ttl=None, folder='sent'): - """Perform an insert into the `sent` table""" - # pylint: disable=unused-variable - # pylint: disable-msg=too-many-locals - - valid_addr = True - if not ripe or not ackdata: - addr = fromAddress if toAddress == '[Broadcast subscribers]' else toAddress - new_status, addressVersionNumber, streamNumber, new_ripe = decodeAddress(addr) - valid_addr = True if new_status == 'success' else False - if not ripe: - ripe = new_ripe - - if not ackdata: - stealthLevel = BMConfigParser().safeGetInt( - 'bitmessagesettings', 'ackstealthlevel') - new_ackdata = genAckPayload(streamNumber, stealthLevel) - ackdata = new_ackdata - if valid_addr: - msgid = msgid if msgid else uuid.uuid4().bytes - sentTime = sentTime if sentTime else int(time.time()) # sentTime (this doesn't change) - lastActionTime = lastActionTime if lastActionTime else int(time.time()) - - ttl = ttl if ttl else BMConfigParser().getint('bitmessagesettings', 'ttl') - - t = (msgid, toAddress, ripe, fromAddress, subject, message, ackdata, - sentTime, lastActionTime, sleeptill, status, retryNumber, folder, - encoding, ttl) - - sqlExecute('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', *t) - return ackdata - else: - return None diff --git a/src/tests/mock/pybitmessage/helper_sql.py b/src/tests/mock/pybitmessage/helper_sql.py deleted file mode 100644 index cba98884..00000000 --- a/src/tests/mock/pybitmessage/helper_sql.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -SQL-related functions defined here are really pass the queries (or other SQL -commands) to :class:`.threads.sqlThread` through `sqlSubmitQueue` queue and check -or return the result got from `sqlReturnQueue`. - -This is done that way because :mod:`sqlite3` is so thread-unsafe that they -won't even let you call it from different threads using your own locks. -SQLite objects can only be used from one thread. - -.. note:: This actually only applies for certain deployments, and/or - really old version of sqlite. I haven't actually seen it anywhere. - Current versions do have support for threading and multiprocessing. - I don't see an urgent reason to refactor this, but it should be noted - in the comment that the problem is mostly not valid. Sadly, last time - I checked, there is no reliable way to check whether the library is - or isn't thread-safe. -""" - -import threading - -from six.moves import queue - - -sqlSubmitQueue = queue.Queue() -"""the queue for SQL""" -sqlReturnQueue = queue.Queue() -"""the queue for results""" -sql_lock = threading.Lock() -""" lock to prevent queueing a new request until the previous response - is available """ -sql_available = False -"""set to True by `.threads.sqlThread` immediately upon start""" -sql_ready = threading.Event() -"""set by `.threads.sqlThread` when ready for processing (after - initialization is done)""" - - -def sqlQuery(sql_statement, *args): - """ - Query sqlite and return results - - :param str sql_statement: SQL statement string - :param list args: SQL query parameters - :rtype: list - """ - assert sql_available - sql_lock.acquire() - sqlSubmitQueue.put(sql_statement) - - if args == (): - sqlSubmitQueue.put('') - elif isinstance(args[0], (list, tuple)): - sqlSubmitQueue.put(args[0]) - else: - sqlSubmitQueue.put(args) - queryreturn, _ = sqlReturnQueue.get() - sql_lock.release() - - return queryreturn - - -def sqlExecuteChunked(sql_statement, idCount, *args): - """Execute chunked SQL statement to avoid argument limit""" - # SQLITE_MAX_VARIABLE_NUMBER, - # unfortunately getting/setting isn't exposed to python - assert sql_available - sqlExecuteChunked.chunkSize = 999 - - if idCount == 0 or idCount > len(args): - return 0 - - total_row_count = 0 - with sql_lock: - for i in range( - len(args) - idCount, len(args), - sqlExecuteChunked.chunkSize - (len(args) - idCount) - ): - chunk_slice = args[ - i:i + sqlExecuteChunked.chunkSize - (len(args) - idCount) - ] - sqlSubmitQueue.put( - sql_statement.format(','.join('?' * len(chunk_slice))) - ) - # first static args, and then iterative chunk - sqlSubmitQueue.put( - args[0:len(args) - idCount] + chunk_slice - ) - ret_val = sqlReturnQueue.get() - total_row_count += ret_val[1] - sqlSubmitQueue.put('commit') - return total_row_count - - -def sqlExecute(sql_statement, *args): - """Execute SQL statement (optionally with arguments)""" - assert sql_available - sql_lock.acquire() - sqlSubmitQueue.put(sql_statement) - - if args == (): - sqlSubmitQueue.put('') - else: - sqlSubmitQueue.put(args) - _, rowcount = sqlReturnQueue.get() - sqlSubmitQueue.put('commit') - sql_lock.release() - return rowcount - - -def sqlExecuteScript(sql_statement): - """Execute SQL script statement""" - - statements = sql_statement.split(";") - with SqlBulkExecute() as sql: - for q in statements: - sql.execute("{}".format(q)) - - -def sqlStoredProcedure(procName): - """Schedule procName to be run""" - assert sql_available - sql_lock.acquire() - sqlSubmitQueue.put(procName) - if procName == "exit": - sqlSubmitQueue.task_done() - sqlSubmitQueue.put("terminate") - sql_lock.release() - - -class SqlBulkExecute(object): - """This is used when you have to execute the same statement in a cycle.""" - - def __enter__(self): - sql_lock.acquire() - return self - - def __exit__(self, exc_type, value, traceback): - sqlSubmitQueue.put('commit') - sql_lock.release() - - @staticmethod - def execute(sql_statement, *args): - """Used for statements that do not return results.""" - assert sql_available - sqlSubmitQueue.put(sql_statement) - - if args == (): - sqlSubmitQueue.put('') - else: - sqlSubmitQueue.put(args) - sqlReturnQueue.get() diff --git a/src/tests/mock/pybitmessage/helper_startup.py b/src/tests/mock/pybitmessage/helper_startup.py deleted file mode 100644 index b4951668..00000000 --- a/src/tests/mock/pybitmessage/helper_startup.py +++ /dev/null @@ -1,392 +0,0 @@ -""" -Startup operations. -""" -# pylint: disable=too-many-branches,too-many-statements - -import ctypes -import logging -import os -import platform -import socket -import sys -import time -from distutils.version import StrictVersion -from struct import pack - -try: - import defaults - import helper_random - import paths - import state - from bmconfigparser import BMConfigParser -except ImportError: - from . import defaults, helper_random, paths, state - from .bmconfigparser import BMConfigParser - -try: - from plugins.plugin import get_plugin -except ImportError: - get_plugin = None - - -logger = logging.getLogger('default') - -# The user may de-select Portable Mode in the settings if they want -# the config files to stay in the application data folder. -StoreConfigFilesInSameDirectoryAsProgramByDefault = False - - -def loadConfig(): - """Load the config""" - config = BMConfigParser() - if state.appdata: - config.read(state.appdata + 'keys.dat') - # state.appdata must have been specified as a startup option. - needToCreateKeysFile = config.safeGet( - 'bitmessagesettings', 'settingsversion') is None - if not needToCreateKeysFile: - logger.info( - 'Loading config files from directory specified' - ' on startup: %s', state.appdata) - else: - config.read(paths.lookupExeFolder() + 'keys.dat') - - if config.safeGet('bitmessagesettings', 'settingsversion'): - logger.info('Loading config files from same directory as program.') - needToCreateKeysFile = False - state.appdata = paths.lookupExeFolder() - else: - # Could not load the keys.dat file in the program directory. - # Perhaps it is in the appdata directory. - state.appdata = paths.lookupAppdataFolder() - config.read(state.appdata + 'keys.dat') - needToCreateKeysFile = config.safeGet( - 'bitmessagesettings', 'settingsversion') is None - if not needToCreateKeysFile: - logger.info( - 'Loading existing config files from %s', state.appdata) - - if needToCreateKeysFile: - - # This appears to be the first time running the program; there is - # no config file (or it cannot be accessed). Create config file. - config.add_section('bitmessagesettings') - config.set('bitmessagesettings', 'settingsversion', '10') - config.set('bitmessagesettings', 'port', '8444') - config.set('bitmessagesettings', 'timeformat', '%%c') - config.set('bitmessagesettings', 'blackwhitelist', 'black') - config.set('bitmessagesettings', 'startonlogon', 'false') - if 'linux' in sys.platform: - config.set('bitmessagesettings', 'minimizetotray', 'false') - # This isn't implimented yet and when True on - # Ubuntu causes Bitmessage to disappear while - # running when minimized. - else: - config.set('bitmessagesettings', 'minimizetotray', 'true') - config.set('bitmessagesettings', 'showtraynotifications', 'true') - config.set('bitmessagesettings', 'startintray', 'false') - config.set('bitmessagesettings', 'socksproxytype', 'none') - config.set('bitmessagesettings', 'sockshostname', 'localhost') - config.set('bitmessagesettings', 'socksport', '9050') - config.set('bitmessagesettings', 'socksauthentication', 'false') - config.set('bitmessagesettings', 'socksusername', '') - config.set('bitmessagesettings', 'sockspassword', '') - config.set('bitmessagesettings', 'keysencrypted', 'false') - config.set('bitmessagesettings', 'messagesencrypted', 'false') - config.set( - 'bitmessagesettings', 'defaultnoncetrialsperbyte', - str(defaults.networkDefaultProofOfWorkNonceTrialsPerByte)) - config.set( - 'bitmessagesettings', 'defaultpayloadlengthextrabytes', - str(defaults.networkDefaultPayloadLengthExtraBytes)) - config.set('bitmessagesettings', 'minimizeonclose', 'false') - config.set('bitmessagesettings', 'dontconnect', 'true') - config.set('bitmessagesettings', 'replybelow', 'False') - config.set('bitmessagesettings', 'maxdownloadrate', '0') - config.set('bitmessagesettings', 'maxuploadrate', '0') - - # UI setting to stop trying to send messages after X days/months - config.set('bitmessagesettings', 'stopresendingafterxdays', '') - config.set('bitmessagesettings', 'stopresendingafterxmonths', '') - - # Are you hoping to add a new option to the keys.dat file? You're in - # the right place for adding it to users who install the software for - # the first time. But you must also add it to the keys.dat file of - # existing users. To do that, search the class_sqlThread.py file - # for the text: "right above this line!" - - if StoreConfigFilesInSameDirectoryAsProgramByDefault: - # Just use the same directory as the program and forget about - # the appdata folder - state.appdata = '' - logger.info( - 'Creating new config files in same directory as program.') - else: - logger.info('Creating new config files in %s', state.appdata) - if not os.path.exists(state.appdata): - os.makedirs(state.appdata) - if not sys.platform.startswith('win'): - os.umask(0o077) - config.save() - else: - updateConfig() - - -def updateConfig(): - """Save the config""" - config = BMConfigParser() - settingsversion = config.getint('bitmessagesettings', 'settingsversion') - if settingsversion == 1: - config.set('bitmessagesettings', 'socksproxytype', 'none') - config.set('bitmessagesettings', 'sockshostname', 'localhost') - config.set('bitmessagesettings', 'socksport', '9050') - config.set('bitmessagesettings', 'socksauthentication', 'false') - config.set('bitmessagesettings', 'socksusername', '') - config.set('bitmessagesettings', 'sockspassword', '') - config.set('bitmessagesettings', 'sockslisten', 'false') - config.set('bitmessagesettings', 'keysencrypted', 'false') - config.set('bitmessagesettings', 'messagesencrypted', 'false') - settingsversion = 2 - # let class_sqlThread update SQL and continue - elif settingsversion == 4: - config.set( - 'bitmessagesettings', 'defaultnoncetrialsperbyte', - str(defaults.networkDefaultProofOfWorkNonceTrialsPerByte)) - config.set( - 'bitmessagesettings', 'defaultpayloadlengthextrabytes', - str(defaults.networkDefaultPayloadLengthExtraBytes)) - settingsversion = 5 - - if settingsversion == 5: - config.set( - 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', '0') - config.set( - 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', '0') - settingsversion = 7 - - if not config.has_option('bitmessagesettings', 'sockslisten'): - config.set('bitmessagesettings', 'sockslisten', 'false') - - if not config.has_option('bitmessagesettings', 'userlocale'): - config.set('bitmessagesettings', 'userlocale', 'system') - - if not config.has_option('bitmessagesettings', 'sendoutgoingconnections'): - config.set('bitmessagesettings', 'sendoutgoingconnections', 'True') - - if not config.has_option('bitmessagesettings', 'useidenticons'): - config.set('bitmessagesettings', 'useidenticons', 'True') - if not config.has_option('bitmessagesettings', 'identiconsuffix'): - # acts as a salt - config.set( - 'bitmessagesettings', 'identiconsuffix', ''.join( - helper_random.randomchoice( - "123456789ABCDEFGHJKLMNPQRSTUVWXYZ" - "abcdefghijkmnopqrstuvwxyz") for x in range(12)) - ) # a twelve character pseudo-password to salt the identicons - - # Add settings to support no longer resending messages after - # a certain period of time even if we never get an ack - if settingsversion == 7: - config.set('bitmessagesettings', 'stopresendingafterxdays', '') - config.set('bitmessagesettings', 'stopresendingafterxmonths', '') - settingsversion = 8 - - # With the change to protocol version 3, reset the user-settable - # difficulties to 1 - if settingsversion == 8: - config.set( - 'bitmessagesettings', 'defaultnoncetrialsperbyte', - str(defaults.networkDefaultProofOfWorkNonceTrialsPerByte)) - config.set( - 'bitmessagesettings', 'defaultpayloadlengthextrabytes', - str(defaults.networkDefaultPayloadLengthExtraBytes)) - previousTotalDifficulty = int( - config.getint( - 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte') - ) / 320 - previousSmallMessageDifficulty = int( - config.getint( - 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') - ) / 14000 - config.set( - 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', - str(previousTotalDifficulty * 1000)) - config.set( - 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', - str(previousSmallMessageDifficulty * 1000)) - settingsversion = 9 - - # Adjust the required POW values for each of this user's addresses - # to conform to protocol v3 norms. - if settingsversion == 9: - for addressInKeysFile in config.addresses(): - try: - previousTotalDifficulty = float( - config.getint( - addressInKeysFile, 'noncetrialsperbyte')) / 320 - previousSmallMessageDifficulty = float( - config.getint( - addressInKeysFile, 'payloadlengthextrabytes')) / 14000 - if previousTotalDifficulty <= 2: - previousTotalDifficulty = 1 - if previousSmallMessageDifficulty < 1: - previousSmallMessageDifficulty = 1 - config.set( - addressInKeysFile, 'noncetrialsperbyte', - str(int(previousTotalDifficulty * 1000))) - config.set( - addressInKeysFile, 'payloadlengthextrabytes', - str(int(previousSmallMessageDifficulty * 1000))) - except Exception: - continue - config.set('bitmessagesettings', 'maxdownloadrate', '0') - config.set('bitmessagesettings', 'maxuploadrate', '0') - settingsversion = 10 - - # sanity check - if config.safeGetInt( - 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte') == 0: - config.set( - 'bitmessagesettings', 'maxacceptablenoncetrialsperbyte', - str(defaults.ridiculousDifficulty - * defaults.networkDefaultProofOfWorkNonceTrialsPerByte) - ) - if config.safeGetInt( - 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes') == 0: - config.set( - 'bitmessagesettings', 'maxacceptablepayloadlengthextrabytes', - str(defaults.ridiculousDifficulty - * defaults.networkDefaultPayloadLengthExtraBytes) - ) - - if not config.has_option('bitmessagesettings', 'onionhostname'): - config.set('bitmessagesettings', 'onionhostname', '') - if not config.has_option('bitmessagesettings', 'onionport'): - config.set('bitmessagesettings', 'onionport', '8444') - if not config.has_option('bitmessagesettings', 'onionbindip'): - config.set('bitmessagesettings', 'onionbindip', '127.0.0.1') - if not config.has_option('bitmessagesettings', 'smtpdeliver'): - config.set('bitmessagesettings', 'smtpdeliver', '') - if not config.has_option( - 'bitmessagesettings', 'hidetrayconnectionnotifications'): - config.set( - 'bitmessagesettings', 'hidetrayconnectionnotifications', 'false') - if config.safeGetInt('bitmessagesettings', 'maxoutboundconnections') < 1: - config.set('bitmessagesettings', 'maxoutboundconnections', '8') - logger.warning('Your maximum outbound connections must be a number.') - - # TTL is now user-specifiable. Let's add an option to save - # whatever the user selects. - if not config.has_option('bitmessagesettings', 'ttl'): - config.set('bitmessagesettings', 'ttl', '367200') - - config.set('bitmessagesettings', 'settingsversion', str(settingsversion)) - config.save() - - -def adjustHalfOpenConnectionsLimit(): - """Check and satisfy half-open connections limit (mainly XP and Vista)""" - if BMConfigParser().safeGet( - 'bitmessagesettings', 'socksproxytype', 'none') != 'none': - state.maximumNumberOfHalfOpenConnections = 4 - return - - is_limited = False - try: - if sys.platform[0:3] == "win": - # Some XP and Vista systems can only have 10 outgoing - # connections at a time. - VER_THIS = StrictVersion(platform.version()) - is_limited = ( - StrictVersion("5.1.2600") <= VER_THIS - and StrictVersion("6.0.6000") >= VER_THIS - ) - except ValueError: - pass - - state.maximumNumberOfHalfOpenConnections = 9 if is_limited else 64 - - -def fixSocket(): - """Add missing socket options and methods mainly on Windows""" - if sys.platform.startswith('linux'): - socket.SO_BINDTODEVICE = 25 - - if not sys.platform.startswith('win'): - return - - # Python 2 on Windows doesn't define a wrapper for - # socket.inet_ntop but we can make one ourselves using ctypes - if not hasattr(socket, 'inet_ntop'): - addressToString = ctypes.windll.ws2_32.WSAAddressToStringA - - def inet_ntop(family, host): - """Converting an IP address in packed - binary format to string format""" - if family == socket.AF_INET: - if len(host) != 4: - raise ValueError("invalid IPv4 host") - host = pack("hH4s8s", socket.AF_INET, 0, host, "\0" * 8) - elif family == socket.AF_INET6: - if len(host) != 16: - raise ValueError("invalid IPv6 host") - host = pack("hHL16sL", socket.AF_INET6, 0, 0, host, 0) - else: - raise ValueError("invalid address family") - buf = "\0" * 64 - lengthBuf = pack("I", len(buf)) - addressToString(host, len(host), None, buf, lengthBuf) - return buf[0:buf.index("\0")] - socket.inet_ntop = inet_ntop - - # Same for inet_pton - if not hasattr(socket, 'inet_pton'): - stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA - - def inet_pton(family, host): - """Converting an IP address in string format - to a packed binary format""" - buf = "\0" * 28 - lengthBuf = pack("I", len(buf)) - if stringToAddress(str(host), - int(family), - None, - buf, - lengthBuf) != 0: - raise socket.error("illegal IP address passed to inet_pton") - if family == socket.AF_INET: - return buf[4:8] - elif family == socket.AF_INET6: - return buf[8:24] - else: - raise ValueError("invalid address family") - socket.inet_pton = inet_pton - - # These sockopts are needed on for IPv6 support - if not hasattr(socket, 'IPPROTO_IPV6'): - socket.IPPROTO_IPV6 = 41 - if not hasattr(socket, 'IPV6_V6ONLY'): - socket.IPV6_V6ONLY = 27 - - -def start_proxyconfig(): - """Check socksproxytype and start any proxy configuration plugin""" - if not get_plugin: - return - config = BMConfigParser() - proxy_type = config.safeGet('bitmessagesettings', 'socksproxytype') - if proxy_type and proxy_type not in ('none', 'SOCKS4a', 'SOCKS5'): - try: - proxyconfig_start = time.time() - if not get_plugin('proxyconfig', name=proxy_type)(config): - raise TypeError() - except TypeError: - # cannot import shutdown here ): - logger.error( - 'Failed to run proxy config plugin %s', - proxy_type, exc_info=True) - config.setTemp('bitmessagesettings', 'dontconnect', 'true') - else: - logger.info( - 'Started proxy config plugin %s in %s sec', - proxy_type, time.time() - proxyconfig_start) diff --git a/src/tests/mock/pybitmessage/highlevelcrypto.py b/src/tests/mock/pybitmessage/highlevelcrypto.py deleted file mode 100644 index 82743acf..00000000 --- a/src/tests/mock/pybitmessage/highlevelcrypto.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -High level cryptographic functions based on `.pyelliptic` OpenSSL bindings. - -.. note:: - Upstream pyelliptic was upgraded from SHA1 to SHA256 for signing. We must - `upgrade PyBitmessage gracefully. `_ - `More discussion. `_ -""" - -from binascii import hexlify - -import pyelliptic -from pyelliptic import OpenSSL -from pyelliptic import arithmetic as a - -from bmconfigparser import BMConfigParser - -__all__ = ['encrypt', 'makeCryptor', 'pointMult', 'privToPub', 'sign', 'verify'] - - -def makeCryptor(privkey): - """Return a private `.pyelliptic.ECC` instance""" - private_key = a.changebase(privkey, 16, 256, minlen=32) - public_key = pointMult(private_key) - privkey_bin = b'\x02\xca\x00\x20' + private_key - pubkey_bin = ( - b'\x02\xca\x00\x20' + public_key[1:-32] + b'\x00\x20' + public_key[-32:] - ) - cryptor = pyelliptic.ECC( - curve='secp256k1', privkey=privkey_bin, pubkey=pubkey_bin) - return cryptor - - -def hexToPubkey(pubkey): - """Convert a pubkey from hex to binary""" - pubkey_raw = a.changebase(pubkey[2:], 16, 256, minlen=64) - pubkey_bin = b'\x02\xca\x00 ' + pubkey_raw[:32] + b'\x00 ' + pubkey_raw[32:] - return pubkey_bin - - -def makePubCryptor(pubkey): - """Return a public `.pyelliptic.ECC` instance""" - pubkey_bin = hexToPubkey(pubkey) - return pyelliptic.ECC(curve='secp256k1', pubkey=pubkey_bin) - - -def privToPub(privkey): - """Converts hex private key into hex public key""" - private_key = a.changebase(privkey, 16, 256, minlen=32) - public_key = pointMult(private_key) - return hexlify(public_key) - - -def encrypt(msg, hexPubkey): - """Encrypts message with hex public key""" - return pyelliptic.ECC(curve='secp256k1').encrypt( - msg, hexToPubkey(hexPubkey)) - - -def decrypt(msg, hexPrivkey): - """Decrypts message with hex private key""" - return makeCryptor(hexPrivkey).decrypt(msg) - - -def decryptFast(msg, cryptor): - """Decrypts message with an existing `.pyelliptic.ECC` object""" - return cryptor.decrypt(msg) - - -def sign(msg, hexPrivkey): - """ - Signs with hex private key using SHA1 or SHA256 depending on - "digestalg" setting - """ - digestAlg = BMConfigParser().safeGet( - 'bitmessagesettings', 'digestalg', 'sha256') - if digestAlg == "sha1": - # SHA1, this will eventually be deprecated - return makeCryptor(hexPrivkey).sign( - msg, digest_alg=OpenSSL.digest_ecdsa_sha1) - elif digestAlg == "sha256": - # SHA256. Eventually this will become the default - return makeCryptor(hexPrivkey).sign(msg, digest_alg=OpenSSL.EVP_sha256) - else: - raise ValueError("Unknown digest algorithm %s" % digestAlg) - - -def verify(msg, sig, hexPubkey): - """Verifies with hex public key using SHA1 or SHA256""" - # As mentioned above, we must upgrade gracefully to use SHA256. So - # let us check the signature using both SHA1 and SHA256 and if one - # of them passes then we will be satisfied. Eventually this can - # be simplified and we'll only check with SHA256. - try: - # old SHA1 algorithm. - sigVerifyPassed = makePubCryptor(hexPubkey).verify( - sig, msg, digest_alg=OpenSSL.digest_ecdsa_sha1) - except: - sigVerifyPassed = False - if sigVerifyPassed: - # The signature check passed using SHA1 - return True - # The signature check using SHA1 failed. Let us try it with SHA256. - try: - return makePubCryptor(hexPubkey).verify( - sig, msg, digest_alg=OpenSSL.EVP_sha256) - except: - return False - - -def pointMult(secret): - """ - Does an EC point multiplication; turns a private key into a public key. - - Evidently, this type of error can occur very rarely: - - >>> File "highlevelcrypto.py", line 54, in pointMult - >>> group = OpenSSL.EC_KEY_get0_group(k) - >>> WindowsError: exception: access violation reading 0x0000000000000008 - """ - while True: - try: - k = OpenSSL.EC_KEY_new_by_curve_name( - OpenSSL.get_curve('secp256k1')) - priv_key = OpenSSL.BN_bin2bn(secret, 32, None) - group = OpenSSL.EC_KEY_get0_group(k) - pub_key = OpenSSL.EC_POINT_new(group) - - OpenSSL.EC_POINT_mul(group, pub_key, priv_key, None, None, None) - OpenSSL.EC_KEY_set_private_key(k, priv_key) - OpenSSL.EC_KEY_set_public_key(k, pub_key) - - size = OpenSSL.i2o_ECPublicKey(k, None) - mb = OpenSSL.create_string_buffer(size) - OpenSSL.i2o_ECPublicKey(k, OpenSSL.byref(OpenSSL.pointer(mb))) - - OpenSSL.EC_POINT_free(pub_key) - OpenSSL.BN_free(priv_key) - OpenSSL.EC_KEY_free(k) - return mb.raw - - except Exception: - import traceback - import time - traceback.print_exc() - time.sleep(0.2) diff --git a/src/tests/mock/pybitmessage/l10n.py b/src/tests/mock/pybitmessage/l10n.py deleted file mode 100644 index 3b16f0b6..00000000 --- a/src/tests/mock/pybitmessage/l10n.py +++ /dev/null @@ -1,152 +0,0 @@ -"""Localization helpers""" - -import logging -import os -import re -import sys -import time - -from six.moves import range - -from bmconfigparser import BMConfigParser - -logger = logging.getLogger('default') - -DEFAULT_ENCODING = 'ISO8859-1' -DEFAULT_LANGUAGE = 'en_US' -DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' - -try: - import locale - encoding = locale.getpreferredencoding(True) or DEFAULT_ENCODING - language = ( - locale.getlocale()[0] or locale.getdefaultlocale()[0] - or DEFAULT_LANGUAGE) -except (ImportError, AttributeError): # FIXME: it never happens - logger.exception('Could not determine language or encoding') - locale = None - encoding = DEFAULT_ENCODING - language = DEFAULT_LANGUAGE - - -windowsLanguageMap = { - "ar": "arabic", - "cs": "czech", - "da": "danish", - "de": "german", - "en": "english", - "eo": "esperanto", - "fr": "french", - "it": "italian", - "ja": "japanese", - "nl": "dutch", - "no": "norwegian", - "pl": "polish", - "pt": "portuguese", - "ru": "russian", - "sk": "slovak", - "zh": "chinese", - "zh_CN": "chinese-simplified", - "zh_HK": "chinese-traditional", - "zh_SG": "chinese-simplified", - "zh_TW": "chinese-traditional" -} - - -time_format = BMConfigParser().safeGet( - 'bitmessagesettings', 'timeformat', DEFAULT_TIME_FORMAT) - -if not re.search(r'\d', time.strftime(time_format)): - time_format = DEFAULT_TIME_FORMAT - -# It seems some systems lie about the encoding they use -# so we perform comprehensive decoding tests -elif sys.version_info[0] == 2: - try: - # Check day names - for i in range(7): - time.strftime( - time_format, (0, 0, 0, 0, 0, 0, i, 0, 0)).decode(encoding) - # Check month names - for i in range(1, 13): - time.strftime( - time_format, (0, i, 0, 0, 0, 0, 0, 0, 0)).decode(encoding) - # Check AM/PM - time.strftime( - time_format, (0, 0, 0, 11, 0, 0, 0, 0, 0)).decode(encoding) - time.strftime( - time_format, (0, 0, 0, 13, 0, 0, 0, 0, 0)).decode(encoding) - # Check DST - time.strftime( - time_format, (0, 0, 0, 0, 0, 0, 0, 0, 1)).decode(encoding) - except Exception: # TODO: write tests and determine exception types - logger.exception('Could not decode locale formatted timestamp') - # time_format = DEFAULT_TIME_FORMAT - encoding = DEFAULT_ENCODING - - -def setlocale(newlocale): - """Set the locale""" - try: - locale.setlocale(locale.LC_ALL, newlocale) - except AttributeError: # locale is None - pass - # it looks like some stuff isn't initialised yet when this is called the - # first time and its init gets the locale settings from the environment - os.environ["LC_ALL"] = newlocale - - -def formatTimestamp(timestamp=None): - """Return a formatted timestamp""" - # For some reason some timestamps are strings so we need to sanitize. - if timestamp is not None and not isinstance(timestamp, int): - try: - timestamp = int(timestamp) - except (ValueError, TypeError): - timestamp = None - - # timestamp can't be less than 0. - if timestamp is not None and timestamp < 0: - timestamp = None - - if timestamp is None: - timestring = time.strftime(time_format) - else: - # In case timestamp is too far in the future - try: - timestring = time.strftime(time_format, time.localtime(timestamp)) - except ValueError: - timestring = time.strftime(time_format) - - if sys.version_info[0] == 2: - return timestring.decode(encoding) - return timestring - - -def getTranslationLanguage(): - """Return the user's language choice""" - userlocale = BMConfigParser().safeGet( - 'bitmessagesettings', 'userlocale', 'system') - return userlocale if userlocale and userlocale != 'system' else language - - -def getWindowsLocale(posixLocale): - """ - Get the Windows locale - Technically this converts the locale string from UNIX to Windows format, - because they use different ones in their - libraries. E.g. "en_EN.UTF-8" to "english". - """ - if posixLocale in windowsLanguageMap: - return windowsLanguageMap[posixLocale] - if "." in posixLocale: - loc = posixLocale.split(".", 1) - if loc[0] in windowsLanguageMap: - return windowsLanguageMap[loc[0]] - if "_" in posixLocale: - loc = posixLocale.split("_", 1) - if loc[0] in windowsLanguageMap: - return windowsLanguageMap[loc[0]] - if posixLocale != DEFAULT_LANGUAGE: - return getWindowsLocale(DEFAULT_LANGUAGE) - return None diff --git a/src/tests/mock/pybitmessage/main.py b/src/tests/mock/pybitmessage/main.py deleted file mode 100644 index e1644436..00000000 --- a/src/tests/mock/pybitmessage/main.py +++ /dev/null @@ -1,13 +0,0 @@ -"""This module is for thread start.""" -import state -import sys -from bitmessagemain import main -from termcolor import colored -print(colored('kivy is not supported at the moment for this version..', 'red')) -sys.exit() - - -if __name__ == '__main__': - state.kivy = True - print("Kivy Loading......") - main() diff --git a/src/tests/mock/pybitmessage/multiqueue.py b/src/tests/mock/pybitmessage/multiqueue.py deleted file mode 100644 index 88b6a4dd..00000000 --- a/src/tests/mock/pybitmessage/multiqueue.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -A queue with multiple internal subqueues. -Elements are added into a random subqueue, and retrieval rotates -""" - -from collections import deque - -from six.moves import queue - -try: - import helper_random -except ImportError: - from . import helper_random - - -class MultiQueue(queue.Queue): - """A base queue class""" - # pylint: disable=redefined-builtin,attribute-defined-outside-init - defaultQueueCount = 10 - - def __init__(self, maxsize=0, count=0): - if not count: - self.queueCount = MultiQueue.defaultQueueCount - else: - self.queueCount = count - queue.Queue.__init__(self, maxsize) - - # Initialize the queue representation - def _init(self, maxsize): - self.iter = 0 - self.queues = [] - for _ in range(self.queueCount): - self.queues.append(deque()) - - def _qsize(self, len=len): - return len(self.queues[self.iter]) - - # Put a new item in the queue - def _put(self, item): - # self.queue.append(item) - self.queues[helper_random.randomrandrange(self.queueCount)].append( - (item)) - - # Get an item from the queue - def _get(self): - return self.queues[self.iter].popleft() - - def iterate(self): - """Increment the iteration counter""" - self.iter = (self.iter + 1) % self.queueCount - - def totalSize(self): - """Return the total number of items in all the queues""" - return sum(len(x) for x in self.queues) diff --git a/src/tests/mock/pybitmessage/namecoin.py b/src/tests/mock/pybitmessage/namecoin.py deleted file mode 100644 index 33d39070..00000000 --- a/src/tests/mock/pybitmessage/namecoin.py +++ /dev/null @@ -1,374 +0,0 @@ -""" -Namecoin queries -""" -# pylint: disable=too-many-branches,protected-access - -import base64 -import httplib -import json -import os -import socket -import sys - -import defaults -from addresses import decodeAddress -from bmconfigparser import BMConfigParser -from debug import logger -from tr import _translate # translate - -configSection = "bitmessagesettings" - - -class RPCError(Exception): - """Error thrown when the RPC call returns an error.""" - - error = None - - def __init__(self, data): - super(RPCError, self).__init__() - self.error = data - - def __str__(self): - return "{0}: {1}".format(type(self).__name__, self.error) - - -class namecoinConnection(object): - """This class handles the Namecoin identity integration.""" - - user = None - password = None - host = None - port = None - nmctype = None - bufsize = 4096 - queryid = 1 - con = None - - def __init__(self, options=None): - """ - Initialise. If options are given, take the connection settings from - them instead of loading from the configs. This can be used to test - currently entered connection settings in the config dialog without - actually changing the values (yet). - """ - if options is None: - self.nmctype = BMConfigParser().get( - configSection, "namecoinrpctype") - self.host = BMConfigParser().get( - configSection, "namecoinrpchost") - self.port = int(BMConfigParser().get( - configSection, "namecoinrpcport")) - self.user = BMConfigParser().get( - configSection, "namecoinrpcuser") - self.password = BMConfigParser().get( - configSection, "namecoinrpcpassword") - else: - self.nmctype = options["type"] - self.host = options["host"] - self.port = int(options["port"]) - self.user = options["user"] - self.password = options["password"] - - assert self.nmctype == "namecoind" or self.nmctype == "nmcontrol" - if self.nmctype == "namecoind": - self.con = httplib.HTTPConnection(self.host, self.port, timeout=3) - - def query(self, identity): - """ - Query for the bitmessage address corresponding to the given identity - string. If it doesn't contain a slash, id/ is prepended. We return - the result as (Error, Address) pair, where the Error is an error - message to display or None in case of success. - """ - slashPos = identity.find("/") - if slashPos < 0: - display_name = identity - identity = "id/" + identity - else: - display_name = identity.split("/")[1] - - try: - if self.nmctype == "namecoind": - res = self.callRPC("name_show", [identity]) - res = res["value"] - elif self.nmctype == "nmcontrol": - res = self.callRPC("data", ["getValue", identity]) - res = res["reply"] - if not res: - return (_translate( - "MainWindow", "The name %1 was not found." - ).arg(identity.decode("utf-8", "ignore")), None) - else: - assert False - except RPCError as exc: - logger.exception("Namecoin query RPC exception") - if isinstance(exc.error, dict): - errmsg = exc.error["message"] - else: - errmsg = exc.error - return (_translate( - "MainWindow", "The namecoin query failed (%1)" - ).arg(errmsg.decode("utf-8", "ignore")), None) - except AssertionError: - return (_translate( - "MainWindow", "Unknown namecoin interface type: %1" - ).arg(self.nmctype.decode("utf-8", "ignore")), None) - except Exception: - logger.exception("Namecoin query exception") - return (_translate( - "MainWindow", "The namecoin query failed."), None) - - try: - res = json.loads(res) - except ValueError: - pass - else: - try: - display_name = res["name"] - except KeyError: - pass - res = res.get("bitmessage") - - valid = decodeAddress(res)[0] == "success" - return ( - None, "%s <%s>" % (display_name, res) - ) if valid else ( - _translate( - "MainWindow", - "The name %1 has no associated Bitmessage address." - ).arg(identity.decode("utf-8", "ignore")), None) - - def test(self): - """ - Test the connection settings. This routine tries to query a "getinfo" - command, and builds either an error message or a success message with - some info from it. - """ - try: - if self.nmctype == "namecoind": - try: - vers = self.callRPC("getinfo", [])["version"] - except RPCError: - vers = self.callRPC("getnetworkinfo", [])["version"] - - v3 = vers % 100 - vers = vers / 100 - v2 = vers % 100 - vers = vers / 100 - v1 = vers - if v3 == 0: - versStr = "0.%d.%d" % (v1, v2) - else: - versStr = "0.%d.%d.%d" % (v1, v2, v3) - message = ( - "success", - _translate( - "MainWindow", - "Success! Namecoind version %1 running.").arg( - versStr.decode("utf-8", "ignore"))) - - elif self.nmctype == "nmcontrol": - res = self.callRPC("data", ["status"]) - prefix = "Plugin data running" - if ("reply" in res) and res["reply"][:len(prefix)] == prefix: - return ( - "success", - _translate( - "MainWindow", - "Success! NMControll is up and running." - ) - ) - - logger.error("Unexpected nmcontrol reply: %s", res) - message = ( - "failed", - _translate( - "MainWindow", - "Couldn\'t understand NMControl." - ) - ) - - else: - sys.exit("Unsupported Namecoin type") - - return message - - except Exception: - logger.info("Namecoin connection test failure") - return ( - "failed", - _translate( - "MainWindow", "The connection to namecoin failed.") - ) - - def callRPC(self, method, params): - """Helper routine that actually performs an JSON RPC call.""" - - data = {"method": method, "params": params, "id": self.queryid} - if self.nmctype == "namecoind": - resp = self.queryHTTP(json.dumps(data)) - elif self.nmctype == "nmcontrol": - resp = self.queryServer(json.dumps(data)) - else: - assert False - val = json.loads(resp) - - if val["id"] != self.queryid: - raise Exception("ID mismatch in JSON RPC answer.") - - if self.nmctype == "namecoind": - self.queryid = self.queryid + 1 - - error = val["error"] - if error is None: - return val["result"] - - if isinstance(error, bool): - raise RPCError(val["result"]) - raise RPCError(error) - - def queryHTTP(self, data): - """Query the server via HTTP.""" - - result = None - - try: - self.con.putrequest("POST", "/") - self.con.putheader("Connection", "Keep-Alive") - self.con.putheader("User-Agent", "bitmessage") - self.con.putheader("Host", self.host) - self.con.putheader("Content-Type", "application/json") - self.con.putheader("Content-Length", str(len(data))) - self.con.putheader("Accept", "application/json") - authstr = "%s:%s" % (self.user, self.password) - self.con.putheader( - "Authorization", "Basic %s" % base64.b64encode(authstr)) - self.con.endheaders() - self.con.send(data) - except: # noqa:E722 - logger.info("HTTP connection error") - return None - - try: - resp = self.con.getresponse() - result = resp.read() - if resp.status != 200: - raise Exception( - "Namecoin returned status" - " %i: %s" % (resp.status, resp.reason)) - except: # noqa:E722 - logger.info("HTTP receive error") - return None - - return result - - def queryServer(self, data): - """Helper routine sending data to the RPC " - "server and returning the result.""" - - try: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - s.settimeout(3) - s.connect((self.host, self.port)) - s.sendall(data) - result = "" - - while True: - tmp = s.recv(self.bufsize) - if not tmp: - break - result += tmp - - s.close() - - return result - - except socket.error as exc: - raise Exception("Socket error in RPC connection: %s" % exc) - - -def lookupNamecoinFolder(): - """ - Look up the namecoin data folder. - - .. todo:: Check whether this works on other platforms as well! - """ - - app = "namecoin" - from os import path, environ - if sys.platform == "darwin": - if "HOME" in environ: - dataFolder = path.join(os.environ["HOME"], - "Library/Application Support/", app) + "/" - else: - sys.exit( - "Could not find home folder, please report this message" - " and your OS X version to the BitMessage Github." - ) - - elif "win32" in sys.platform or "win64" in sys.platform: - dataFolder = path.join(environ["APPDATA"], app) + "\\" - else: - dataFolder = path.join(environ["HOME"], ".%s" % app) + "/" - - return dataFolder - - -def ensureNamecoinOptions(): - """ - Ensure all namecoin options are set, by setting those to default values - that aren't there. - """ - - if not BMConfigParser().has_option(configSection, "namecoinrpctype"): - BMConfigParser().set(configSection, "namecoinrpctype", "namecoind") - if not BMConfigParser().has_option(configSection, "namecoinrpchost"): - BMConfigParser().set(configSection, "namecoinrpchost", "localhost") - - hasUser = BMConfigParser().has_option(configSection, "namecoinrpcuser") - hasPass = BMConfigParser().has_option(configSection, "namecoinrpcpassword") - hasPort = BMConfigParser().has_option(configSection, "namecoinrpcport") - - # Try to read user/password from .namecoin configuration file. - defaultUser = "" - defaultPass = "" - nmcFolder = lookupNamecoinFolder() - nmcConfig = nmcFolder + "namecoin.conf" - try: - nmc = open(nmcConfig, "r") - - while True: - line = nmc.readline() - if line == "": - break - parts = line.split("=") - if len(parts) == 2: - key = parts[0] - val = parts[1].rstrip() - - if key == "rpcuser" and not hasUser: - defaultUser = val - if key == "rpcpassword" and not hasPass: - defaultPass = val - if key == "rpcport": - defaults.namecoinDefaultRpcPort = val - - nmc.close() - except IOError: - logger.warning( - "%s unreadable or missing, Namecoin support deactivated", - nmcConfig) - except Exception: - logger.warning("Error processing namecoin.conf", exc_info=True) - - # If still nothing found, set empty at least. - if not hasUser: - BMConfigParser().set(configSection, "namecoinrpcuser", defaultUser) - if not hasPass: - BMConfigParser().set(configSection, "namecoinrpcpassword", defaultPass) - - # Set default port now, possibly to found value. - if not hasPort: - BMConfigParser().set(configSection, "namecoinrpcport", - defaults.namecoinDefaultRpcPort) diff --git a/src/tests/mock/pybitmessage/network/__init__.py b/src/tests/mock/pybitmessage/network/__init__.py deleted file mode 100644 index 70613539..00000000 --- a/src/tests/mock/pybitmessage/network/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Network subsystem packages -""" -from addrthread import AddrThread -from announcethread import AnnounceThread -from connectionpool import BMConnectionPool -from dandelion import Dandelion -from downloadthread import DownloadThread -from invthread import InvThread -from networkthread import BMNetworkThread -from receivequeuethread import ReceiveQueueThread -from threads import StoppableThread -from uploadthread import UploadThread - - -__all__ = [ - "BMConnectionPool", "Dandelion", - "AddrThread", "AnnounceThread", "BMNetworkThread", "DownloadThread", - "InvThread", "ReceiveQueueThread", "UploadThread", "StoppableThread" -] diff --git a/src/tests/mock/pybitmessage/network/addrthread.py b/src/tests/mock/pybitmessage/network/addrthread.py deleted file mode 100644 index 79ed651b..00000000 --- a/src/tests/mock/pybitmessage/network/addrthread.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Announce addresses as they are received from other hosts -""" -from six.moves import queue - - -from pybitmessage import state -from pybitmessage.helper_random import randomshuffle -from pybitmessage.network.assemble import assemble_addr -from pybitmessage.network.connectionpool import BMConnectionPool -from pybitmessage.queues import addrQueue -from pybitmessage.threads import StoppableThread - - -class AddrThread(StoppableThread): - """(Node) address broadcasting thread""" - name = "AddrBroadcaster" - - def run(self): - while not state.shutdown: - chunk = [] - while True: - try: - data = addrQueue.get(False) - chunk.append(data) - except queue.Empty: - break - - if chunk: - # Choose peers randomly - connections = BMConnectionPool().establishedConnections() - randomshuffle(connections) - for i in connections: - randomshuffle(chunk) - filtered = [] - for stream, peer, seen, destination in chunk: - # peer's own address or address received from peer - if i.destination in (peer, destination): - continue - if stream not in i.streams: - continue - filtered.append((stream, peer, seen)) - if filtered: - i.append_write_buf(assemble_addr(filtered)) - - addrQueue.iterate() - for i in range(len(chunk)): - addrQueue.task_done() - self.stop.wait(1) diff --git a/src/tests/mock/pybitmessage/network/advanceddispatcher.py b/src/tests/mock/pybitmessage/network/advanceddispatcher.py deleted file mode 100644 index 645d7ee2..00000000 --- a/src/tests/mock/pybitmessage/network/advanceddispatcher.py +++ /dev/null @@ -1,173 +0,0 @@ -""" -Improved version of asyncore dispatcher -""" -import socket -import threading -import time - -from pybitmessage.network import asyncore_pollchoose as asyncore -from pybitmessage import state -from pybitmessage.threads import BusyError, nonBlocking - - -class ProcessingError(Exception): - """General class for protocol parser exception, - use as a base for others.""" - pass - - -class UnknownStateError(ProcessingError): - """Parser points to an unknown (unimplemented) state.""" - pass - - -class AdvancedDispatcher(asyncore.dispatcher): - """Improved version of asyncore dispatcher, - with buffers and protocol state.""" - # pylint: disable=too-many-instance-attributes - _buf_len = 131072 # 128kB - - def __init__(self, sock=None): - if not hasattr(self, '_map'): - asyncore.dispatcher.__init__(self, sock) - self.connectedAt = 0 - self.close_reason = None - self.read_buf = bytearray() - self.write_buf = bytearray() - self.state = "init" - self.lastTx = time.time() - self.sentBytes = 0 - self.receivedBytes = 0 - self.expectBytes = 0 - self.readLock = threading.RLock() - self.writeLock = threading.RLock() - self.processingLock = threading.RLock() - self.uploadChunk = self.downloadChunk = 0 - - def append_write_buf(self, data): - """Append binary data to the end of stream write buffer.""" - if data: - if isinstance(data, list): - with self.writeLock: - for chunk in data: - self.write_buf.extend(chunk) - else: - with self.writeLock: - self.write_buf.extend(data) - - def slice_write_buf(self, length=0): - """Cut the beginning of the stream write buffer.""" - if length > 0: - with self.writeLock: - if length >= len(self.write_buf): - del self.write_buf[:] - else: - del self.write_buf[0:length] - - def slice_read_buf(self, length=0): - """Cut the beginning of the stream read buffer.""" - if length > 0: - with self.readLock: - if length >= len(self.read_buf): - del self.read_buf[:] - else: - del self.read_buf[0:length] - - def process(self): - """Process (parse) data that's in the buffer, - as long as there is enough data and the connection is open.""" - while self.connected and not state.shutdown: - try: - with nonBlocking(self.processingLock): - if not self.connected or state.shutdown: - break - if len(self.read_buf) < self.expectBytes: - return False - try: - cmd = getattr(self, "state_" + str(self.state)) - except AttributeError: - self.logger.error( - 'Unknown state %s', self.state, exc_info=True) - raise UnknownStateError(self.state) - if not cmd(): - break - except BusyError: - return False - return False - - def set_state(self, state_str, length=0, expectBytes=0): - """Set the next processing state.""" - self.expectBytes = expectBytes - self.slice_read_buf(length) - self.state = state_str - - def writable(self): - """Is data from the write buffer ready to be sent to the network?""" - self.uploadChunk = AdvancedDispatcher._buf_len - if asyncore.maxUploadRate > 0: - self.uploadChunk = int(asyncore.uploadBucket) - self.uploadChunk = min(self.uploadChunk, len(self.write_buf)) - return asyncore.dispatcher.writable(self) and ( - self.connecting or ( - self.connected and self.uploadChunk > 0)) - - def readable(self): - """Is the read buffer ready to accept data from the network?""" - self.downloadChunk = AdvancedDispatcher._buf_len - if asyncore.maxDownloadRate > 0: - self.downloadChunk = int(asyncore.downloadBucket) - try: - if self.expectBytes > 0 and not self.fullyEstablished: - self.downloadChunk = min( - self.downloadChunk, self.expectBytes - len(self.read_buf)) - if self.downloadChunk < 0: - self.downloadChunk = 0 - except AttributeError: - pass - return asyncore.dispatcher.readable(self) and ( - self.connecting or self.accepting or ( - self.connected and self.downloadChunk > 0)) - - def handle_read(self): - """Append incoming data to the read buffer.""" - self.lastTx = time.time() - newData = self.recv(self.downloadChunk) - self.receivedBytes += len(newData) - asyncore.update_received(len(newData)) - with self.readLock: - self.read_buf.extend(newData) - - def handle_write(self): - """Send outgoing data from write buffer.""" - self.lastTx = time.time() - written = self.send(self.write_buf[0:self.uploadChunk]) - asyncore.update_sent(written) - self.sentBytes += written - self.slice_write_buf(written) - - def handle_connect_event(self): - """Callback for connection established event.""" - try: - asyncore.dispatcher.handle_connect_event(self) - except socket.error as e: - # pylint: disable=protected-access - if e.args[0] not in asyncore._DISCONNECTED: - raise - - def handle_connect(self): - """Method for handling connection established implementations.""" - self.lastTx = time.time() - - def state_close(self): # pylint: disable=no-self-use - """Signal to the processing loop to end.""" - return False - - def handle_close(self): - """Callback for connection being closed, - but can also be called directly when you want connection to close.""" - with self.readLock: - self.read_buf = bytearray() - with self.writeLock: - self.write_buf = bytearray() - self.set_state("close") - self.close() diff --git a/src/tests/mock/pybitmessage/network/announcethread.py b/src/tests/mock/pybitmessage/network/announcethread.py deleted file mode 100644 index e34ed963..00000000 --- a/src/tests/mock/pybitmessage/network/announcethread.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Announce myself (node address) -""" -import time - -import state -from bmconfigparser import BMConfigParser -from network.assemble import assemble_addr -from network.connectionpool import BMConnectionPool -from node import Peer -from threads import StoppableThread - - -class AnnounceThread(StoppableThread): - """A thread to manage regular announcing of this node""" - name = "Announcer" - announceInterval = 60 - - def run(self): - lastSelfAnnounced = 0 - while not self._stopped and state.shutdown == 0: - processed = 0 - if lastSelfAnnounced < time.time() - self.announceInterval: - self.announceSelf() - lastSelfAnnounced = time.time() - if processed == 0: - self.stop.wait(10) - - @staticmethod - def announceSelf(): - """Announce our presence""" - for connection in BMConnectionPool().udpSockets.values(): - if not connection.announcing: - continue - for stream in state.streamsInWhichIAmParticipating: - addr = ( - stream, - Peer( - '127.0.0.1', - BMConfigParser().safeGetInt( - 'bitmessagesettings', 'port')), - time.time()) - connection.append_write_buf(assemble_addr([addr])) diff --git a/src/tests/mock/pybitmessage/network/assemble.py b/src/tests/mock/pybitmessage/network/assemble.py deleted file mode 100644 index 599559a0..00000000 --- a/src/tests/mock/pybitmessage/network/assemble.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -Create bitmessage protocol command packets -""" -import struct - -from pybitmessage import addresses -from pybitmessage.network.constants import MAX_ADDR_COUNT -from pybitmessage.network.node import Peer -from pybitmessage.protocol import CreatePacket, encodeHost - - -def assemble_addr(peerList): - """Create address command""" - if isinstance(peerList, Peer): - peerList = [peerList] - if not peerList: - return b'' - retval = b'' - for i in range(0, len(peerList), MAX_ADDR_COUNT): - payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT])) - for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]: - # 64-bit time - payload += struct.pack('>Q', timestamp) - payload += struct.pack('>I', stream) - # service bit flags offered by this node - payload += struct.pack('>q', 1) - payload += encodeHost(peer.host) - # remote port - payload += struct.pack('>H', peer.port) - retval += CreatePacket('addr', payload) - return retval diff --git a/src/tests/mock/pybitmessage/network/asyncore_pollchoose.py b/src/tests/mock/pybitmessage/network/asyncore_pollchoose.py deleted file mode 100644 index 2265ab3b..00000000 --- a/src/tests/mock/pybitmessage/network/asyncore_pollchoose.py +++ /dev/null @@ -1,1012 +0,0 @@ -""" -Basic infrastructure for asynchronous socket service clients and servers. -""" -# -*- Mode: Python -*- -# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp -# Author: Sam Rushing -# pylint: disable=too-many-branches,too-many-lines,global-statement -# pylint: disable=redefined-builtin,no-self-use -import os -import select -import socket -import sys -import time -import warnings -from errno import ( - EADDRINUSE, EAGAIN, EALREADY, EBADF, ECONNABORTED, ECONNREFUSED, - ECONNRESET, EHOSTUNREACH, EINPROGRESS, EINTR, EINVAL, EISCONN, ENETUNREACH, - ENOTCONN, ENOTSOCK, EPIPE, ESHUTDOWN, ETIMEDOUT, EWOULDBLOCK, errorcode -) -from threading import current_thread - -from pybitmessage import helper_random - -try: - from errno import WSAEWOULDBLOCK -except (ImportError, AttributeError): - WSAEWOULDBLOCK = EWOULDBLOCK -try: - from errno import WSAENOTSOCK -except (ImportError, AttributeError): - WSAENOTSOCK = ENOTSOCK -try: - from errno import WSAECONNRESET -except (ImportError, AttributeError): - WSAECONNRESET = ECONNRESET -try: - # Desirable side-effects on Windows; imports winsock error numbers - from errno import WSAEADDRINUSE # pylint: disable=unused-import -except (ImportError, AttributeError): - WSAEADDRINUSE = EADDRINUSE - - -_DISCONNECTED = frozenset(( - ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE, EBADF, ECONNREFUSED, - EHOSTUNREACH, ENETUNREACH, ETIMEDOUT, WSAECONNRESET)) - -OP_READ = 1 -OP_WRITE = 2 - -try: - socket_map -except NameError: - socket_map = {} - - -def _strerror(err): - try: - return os.strerror(err) - except (ValueError, OverflowError, NameError): - if err in errorcode: - return errorcode[err] - return "Unknown error %s" % err - - -class ExitNow(Exception): - """We don't use directly but may be necessary as we replace - asyncore due to some library raising or expecting it""" - pass - - -_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit) - -maxDownloadRate = 0 -downloadTimestamp = 0 -downloadBucket = 0 -receivedBytes = 0 -maxUploadRate = 0 -uploadTimestamp = 0 -uploadBucket = 0 -sentBytes = 0 - - -def read(obj): - """Event to read from the object, i.e. its network socket.""" - - if not can_receive(): - return - try: - obj.handle_read_event() - except _reraised_exceptions: - raise - except BaseException: - obj.handle_error() - - -def write(obj): - """Event to write to the object, i.e. its network socket.""" - - if not can_send(): - return - try: - obj.handle_write_event() - except _reraised_exceptions: - raise - except BaseException: - obj.handle_error() - - -def set_rates(download, upload): - """Set throttling rates""" - - global maxDownloadRate, maxUploadRate, downloadBucket - global uploadBucket, downloadTimestamp, uploadTimestamp - - maxDownloadRate = float(download) * 1024 - maxUploadRate = float(upload) * 1024 - downloadBucket = maxDownloadRate - uploadBucket = maxUploadRate - downloadTimestamp = time.time() - uploadTimestamp = time.time() - - -def can_receive(): - """Predicate indicating whether the download throttle is in effect""" - - return maxDownloadRate == 0 or downloadBucket > 0 - - -def can_send(): - """Predicate indicating whether the upload throttle is in effect""" - - return maxUploadRate == 0 or uploadBucket > 0 - - -def update_received(download=0): - """Update the receiving throttle""" - - global receivedBytes, downloadBucket, downloadTimestamp - - currentTimestamp = time.time() - receivedBytes += download - if maxDownloadRate > 0: - bucketIncrease = \ - maxDownloadRate * (currentTimestamp - downloadTimestamp) - downloadBucket += bucketIncrease - if downloadBucket > maxDownloadRate: - downloadBucket = int(maxDownloadRate) - downloadBucket -= download - downloadTimestamp = currentTimestamp - - -def update_sent(upload=0): - """Update the sending throttle""" - - global sentBytes, uploadBucket, uploadTimestamp - - currentTimestamp = time.time() - sentBytes += upload - if maxUploadRate > 0: - bucketIncrease = maxUploadRate * (currentTimestamp - uploadTimestamp) - uploadBucket += bucketIncrease - if uploadBucket > maxUploadRate: - uploadBucket = int(maxUploadRate) - uploadBucket -= upload - uploadTimestamp = currentTimestamp - - -def _exception(obj): - """Handle exceptions as appropriate""" - - try: - obj.handle_expt_event() - except _reraised_exceptions: - raise - except BaseException: - obj.handle_error() - - -def readwrite(obj, flags): - """Read and write any pending data to/from the object""" - - try: - if flags & select.POLLIN and can_receive(): - obj.handle_read_event() - if flags & select.POLLOUT and can_send(): - obj.handle_write_event() - if flags & select.POLLPRI: - obj.handle_expt_event() - if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL): - obj.handle_close() - except socket.error as e: - if e.args[0] not in _DISCONNECTED: - obj.handle_error() - else: - obj.handle_close() - except _reraised_exceptions: - raise - except BaseException: - obj.handle_error() - - -def select_poller(timeout=0.0, map=None): - """A poller which uses select(), available on most platforms.""" - - if map is None: - map = socket_map - if map: - r = [] - w = [] - e = [] - for fd, obj in list(map.items()): - is_r = obj.readable() - is_w = obj.writable() - if is_r: - r.append(fd) - # accepting sockets should not be writable - if is_w and not obj.accepting: - w.append(fd) - if is_r or is_w: - e.append(fd) - if [] == r == w == e: - time.sleep(timeout) - return - - try: - r, w, e = select.select(r, w, e, timeout) - except KeyboardInterrupt: - return - except socket.error as err: - if err.args[0] in (EBADF, EINTR): - return - except Exception as err: - if err.args[0] in (WSAENOTSOCK, ): - return - - for fd in helper_random.randomsample(r, len(r)): - obj = map.get(fd) - if obj is None: - continue - read(obj) - - for fd in helper_random.randomsample(w, len(w)): - obj = map.get(fd) - if obj is None: - continue - write(obj) - - for fd in e: - obj = map.get(fd) - if obj is None: - continue - _exception(obj) - else: - current_thread().stop.wait(timeout) - - -def poll_poller(timeout=0.0, map=None): - """A poller which uses poll(), available on most UNIXen.""" - - if map is None: - map = socket_map - if timeout is not None: - # timeout is in milliseconds - timeout = int(timeout * 1000) - try: - poll_poller.pollster - except AttributeError: - poll_poller.pollster = select.poll() - if map: - for fd, obj in list(map.items()): - flags = newflags = 0 - if obj.readable(): - flags |= select.POLLIN | select.POLLPRI - newflags |= OP_READ - else: - newflags &= ~ OP_READ - # accepting sockets should not be writable - if obj.writable() and not obj.accepting: - flags |= select.POLLOUT - newflags |= OP_WRITE - else: - newflags &= ~ OP_WRITE - if newflags != obj.poller_flags: - obj.poller_flags = newflags - try: - if obj.poller_registered: - poll_poller.pollster.modify(fd, flags) - else: - poll_poller.pollster.register(fd, flags) - obj.poller_registered = True - except IOError: - pass - try: - r = poll_poller.pollster.poll(timeout) - except KeyboardInterrupt: - r = [] - except socket.error as err: - if err.args[0] in (EBADF, WSAENOTSOCK, EINTR): - return - for fd, flags in helper_random.randomsample(r, len(r)): - obj = map.get(fd) - if obj is None: - continue - readwrite(obj, flags) - else: - current_thread().stop.wait(timeout) - - -# Aliases for backward compatibility -poll = select_poller -poll2 = poll3 = poll_poller - - -def epoll_poller(timeout=0.0, map=None): - """A poller which uses epoll(), supported on Linux 2.5.44 and newer.""" - - if map is None: - map = socket_map - try: - epoll_poller.pollster - except AttributeError: - epoll_poller.pollster = select.epoll() - if map: - for fd, obj in map.items(): - flags = newflags = 0 - if obj.readable(): - flags |= select.POLLIN | select.POLLPRI - newflags |= OP_READ - else: - newflags &= ~ OP_READ - # accepting sockets should not be writable - if obj.writable() and not obj.accepting: - flags |= select.POLLOUT - newflags |= OP_WRITE - else: - newflags &= ~ OP_WRITE - if newflags != obj.poller_flags: - obj.poller_flags = newflags - # Only check for exceptions if object was either readable - # or writable. - flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL - try: - if obj.poller_registered: - epoll_poller.pollster.modify(fd, flags) - else: - epoll_poller.pollster.register(fd, flags) - obj.poller_registered = True - except IOError: - pass - try: - r = epoll_poller.pollster.poll(timeout) - except IOError as e: - if e.errno != EINTR: - raise - r = [] - except select.error as err: - if err.args[0] != EINTR: - raise - r = [] - for fd, flags in helper_random.randomsample(r, len(r)): - obj = map.get(fd) - if obj is None: - continue - readwrite(obj, flags) - else: - current_thread().stop.wait(timeout) - - -def kqueue_poller(timeout=0.0, map=None): - """A poller which uses kqueue(), BSD specific.""" - # pylint: disable=no-member,too-many-statements - - if map is None: - map = socket_map - try: - kqueue_poller.pollster - except AttributeError: - kqueue_poller.pollster = select.kqueue() - if map: - updates = [] - selectables = 0 - for fd, obj in map.items(): - kq_filter = 0 - if obj.readable(): - kq_filter |= 1 - selectables += 1 - if obj.writable() and not obj.accepting: - kq_filter |= 2 - selectables += 1 - if kq_filter != obj.poller_filter: - # unlike other pollers, READ and WRITE aren't OR able but have - # to be set and checked separately - if kq_filter & 1 != obj.poller_filter & 1: - poller_flags = select.KQ_EV_ADD - if kq_filter & 1: - poller_flags |= select.KQ_EV_ENABLE - else: - poller_flags |= select.KQ_EV_DISABLE - updates.append( - select.kevent( - fd, filter=select.KQ_FILTER_READ, - flags=poller_flags)) - if kq_filter & 2 != obj.poller_filter & 2: - poller_flags = select.KQ_EV_ADD - if kq_filter & 2: - poller_flags |= select.KQ_EV_ENABLE - else: - poller_flags |= select.KQ_EV_DISABLE - updates.append( - select.kevent( - fd, filter=select.KQ_FILTER_WRITE, - flags=poller_flags)) - obj.poller_filter = kq_filter - - if not selectables: - # unlike other pollers, kqueue poll does not wait if there are no - # filters setup - current_thread().stop.wait(timeout) - return - - events = kqueue_poller.pollster.control(updates, selectables, timeout) - if len(events) > 1: - events = helper_random.randomsample(events, len(events)) - - for event in events: - fd = event.ident - obj = map.get(fd) - if obj is None: - continue - if event.flags & select.KQ_EV_ERROR: - _exception(obj) - continue - if event.flags & select.KQ_EV_EOF and event.data and event.fflags: - obj.handle_close() - continue - if event.filter == select.KQ_FILTER_READ: - read(obj) - if event.filter == select.KQ_FILTER_WRITE: - write(obj) - else: - current_thread().stop.wait(timeout) - - -def loop(timeout=30.0, use_poll=False, map=None, count=None, poller=None): - """Poll in a loop, until count or timeout is reached""" - - if map is None: - map = socket_map - if count is None: - count = True - # code which grants backward compatibility with "use_poll" - # argument which should no longer be used in favor of - # "poller" - - if poller is None: - if use_poll: - poller = poll_poller - elif hasattr(select, 'epoll'): - poller = epoll_poller - elif hasattr(select, 'kqueue'): - poller = kqueue_poller - elif hasattr(select, 'poll'): - poller = poll_poller - elif hasattr(select, 'select'): - poller = select_poller - - if timeout == 0: - deadline = 0 - else: - deadline = time.time() + timeout - while count: - # fill buckets first - update_sent() - update_received() - subtimeout = deadline - time.time() - if subtimeout <= 0: - break - # then poll - poller(subtimeout, map) - if isinstance(count, int): - count = count - 1 - - -class dispatcher(object): - """Dispatcher for socket objects""" - # pylint: disable=too-many-public-methods,too-many-instance-attributes - - debug = False - connected = False - accepting = False - connecting = False - closing = False - addr = None - ignore_log_types = frozenset(['warning']) - poller_registered = False - poller_flags = 0 - # don't do network IO with a smaller bucket than this - minTx = 1500 - - def __init__(self, sock=None, map=None): - if map is None: - self._map = socket_map - else: - self._map = map - - self._fileno = None - - if sock: - # Set to nonblocking just to make sure for cases where we - # get a socket from a blocking source. - sock.setblocking(0) - self.set_socket(sock, map) - self.connected = True - # The constructor no longer requires that the socket - # passed be connected. - try: - self.addr = sock.getpeername() - except socket.error as err: - if err.args[0] in (ENOTCONN, EINVAL): - # To handle the case where we got an unconnected - # socket. - self.connected = False - else: - # The socket is broken in some unknown way, alert - # the user and remove it from the map (to prevent - # polling of broken sockets). - self.del_channel(map) - raise - else: - self.socket = None - - def __repr__(self): - status = [self.__class__.__module__ + "." + self.__class__.__name__] - if self.accepting and self.addr: - status.append('listening') - elif self.connected: - status.append('connected') - if self.addr is not None: - try: - status.append('%s:%d' % self.addr) - except TypeError: - status.append(repr(self.addr)) - return '<%s at %#x>' % (' '.join(status), id(self)) - - __str__ = __repr__ - - def add_channel(self, map=None): - """Add a channel""" - # pylint: disable=attribute-defined-outside-init - if map is None: - map = self._map - map[self._fileno] = self - self.poller_flags = 0 - self.poller_filter = 0 - - def del_channel(self, map=None): - """Delete a channel""" - fd = self._fileno - if map is None: - map = self._map - if fd in map: - del map[fd] - if self._fileno: - try: - kqueue_poller.pollster.control([select.kevent( - fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE)], 0) - except(AttributeError, KeyError, TypeError, IOError, OSError): - pass - try: - kqueue_poller.pollster.control([select.kevent( - fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)], 0) - except(AttributeError, KeyError, TypeError, IOError, OSError): - pass - try: - epoll_poller.pollster.unregister(fd) - except (AttributeError, KeyError, TypeError, IOError): - # no epoll used, or not registered - pass - try: - poll_poller.pollster.unregister(fd) - except (AttributeError, KeyError, TypeError, IOError): - # no poll used, or not registered - pass - self._fileno = None - self.poller_flags = 0 - self.poller_filter = 0 - self.poller_registered = False - - def create_socket( - self, family=socket.AF_INET, socket_type=socket.SOCK_STREAM): - """Create a socket""" - # pylint: disable=attribute-defined-outside-init - self.family_and_type = family, socket_type - sock = socket.socket(family, socket_type) - sock.setblocking(0) - self.set_socket(sock) - - def set_socket(self, sock, map=None): - """Set socket""" - self.socket = sock - self._fileno = sock.fileno() - self.add_channel(map) - - def set_reuse_addr(self): - """try to re-use a server port if possible""" - try: - self.socket.setsockopt( - socket.SOL_SOCKET, socket.SO_REUSEADDR, self.socket.getsockopt( - socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1 - ) - except socket.error: - pass - - # ================================================== - # predicates for select() - # these are used as filters for the lists of sockets - # to pass to select(). - # ================================================== - - def readable(self): - """Predicate to indicate download throttle status""" - if maxDownloadRate > 0: - return downloadBucket > dispatcher.minTx - return True - - def writable(self): - """Predicate to indicate upload throttle status""" - if maxUploadRate > 0: - return uploadBucket > dispatcher.minTx - return True - - # ================================================== - # socket object methods. - # ================================================== - - def listen(self, num): - """Listen on a port""" - self.accepting = True - if os.name == 'nt' and num > 5: - num = 5 - return self.socket.listen(num) - - def bind(self, addr): - """Bind to an address""" - self.addr = addr - return self.socket.bind(addr) - - def connect(self, address): - """Connect to an address""" - self.connected = False - self.connecting = True - err = self.socket.connect_ex(address) - if err in (EINPROGRESS, EALREADY, EWOULDBLOCK, WSAEWOULDBLOCK) \ - or err == EINVAL and os.name in ('nt', 'ce'): - self.addr = address - return - if err in (0, EISCONN): - self.addr = address - self.handle_connect_event() - else: - raise socket.error(err, errorcode[err]) - - def accept(self): - """Accept incoming connections. - Returns either an address pair or None.""" - try: - conn, addr = self.socket.accept() - except TypeError: - return None - except socket.error as why: - if why.args[0] in ( - EWOULDBLOCK, WSAEWOULDBLOCK, ECONNABORTED, - EAGAIN, ENOTCONN): - return None - else: - raise - else: - return conn, addr - - def send(self, data): - """Send data""" - try: - result = self.socket.send(data) - return result - except socket.error as why: - if why.args[0] in (EAGAIN, EWOULDBLOCK, WSAEWOULDBLOCK): - return 0 - elif why.args[0] in _DISCONNECTED: - self.handle_close() - return 0 - else: - raise - - def recv(self, buffer_size): - """Receive data""" - try: - data = self.socket.recv(buffer_size) - if not data: - # a closed connection is indicated by signaling - # a read condition, and having recv() return 0. - self.handle_close() - return b'' - return data - except socket.error as why: - # winsock sometimes raises ENOTCONN - if why.args[0] in (EAGAIN, EWOULDBLOCK, WSAEWOULDBLOCK): - return b'' - if why.args[0] in _DISCONNECTED: - self.handle_close() - return b'' - else: - raise - - def close(self): - """Close connection""" - self.connected = False - self.accepting = False - self.connecting = False - self.del_channel() - try: - self.socket.close() - except socket.error as why: - if why.args[0] not in (ENOTCONN, EBADF): - raise - - # cheap inheritance, used to pass all other attribute - # references to the underlying socket object. - def __getattr__(self, attr): - try: - retattr = getattr(self.socket, attr) - except AttributeError: - raise AttributeError( - "%s instance has no attribute '%s'" - % (self.__class__.__name__, attr)) - else: - msg = "%(me)s.%(attr)s is deprecated; use %(me)s.socket.%(attr)s"\ - " instead" % {'me': self.__class__.__name__, 'attr': attr} - warnings.warn(msg, DeprecationWarning, stacklevel=2) - return retattr - - # log and log_info may be overridden to provide more sophisticated - # logging and warning methods. In general, log is for 'hit' logging - # and 'log_info' is for informational, warning and error logging. - - def log(self, message): - """Log a message to stderr""" - sys.stderr.write('log: %s\n' % str(message)) - - def log_info(self, message, log_type='info'): - """Conditionally print a message""" - if log_type not in self.ignore_log_types: - print('%s: %s' % (log_type, message)) - - def handle_read_event(self): - """Handle a read event""" - if self.accepting: - # accepting sockets are never connected, they "spawn" new - # sockets that are connected - self.handle_accept() - elif not self.connected: - if self.connecting: - self.handle_connect_event() - self.handle_read() - else: - self.handle_read() - - def handle_connect_event(self): - """Handle a connection event""" - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - if err != 0: - raise socket.error(err, _strerror(err)) - self.handle_connect() - self.connected = True - self.connecting = False - - def handle_write_event(self): - """Handle a write event""" - if self.accepting: - # Accepting sockets shouldn't get a write event. - # We will pretend it didn't happen. - return - - if not self.connected: - if self.connecting: - self.handle_connect_event() - self.handle_write() - - def handle_expt_event(self): - """Handle expected exceptions""" - # handle_expt_event() is called if there might be an error on the - # socket, or if there is OOB data - # check for the error condition first - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - if err != 0: - # we can get here when select.select() says that there is an - # exceptional condition on the socket - # since there is an error, we'll go ahead and close the socket - # like we would in a subclassed handle_read() that received no - # data - self.handle_close() - elif sys.platform.startswith("win"): - # async connect failed - self.handle_close() - else: - self.handle_expt() - - def handle_error(self): - """Handle unexpected exceptions""" - _, t, v, tbinfo = compact_traceback() - - # sometimes a user repr method will crash. - try: - self_repr = repr(self) - except BaseException: - self_repr = '<__repr__(self) failed for object at %0x>' % id(self) - - self.log_info( - 'uncaptured python exception, closing channel %s (%s:%s %s)' % ( - self_repr, t, v, tbinfo), - 'error') - self.handle_close() - - def handle_accept(self): - """Handle an accept event""" - pair = self.accept() - if pair is not None: - self.handle_accepted(*pair) - - def handle_expt(self): - """Log that the subclass does not implement handle_expt""" - self.log_info('unhandled incoming priority event', 'warning') - - def handle_read(self): - """Log that the subclass does not implement handle_read""" - self.log_info('unhandled read event', 'warning') - - def handle_write(self): - """Log that the subclass does not implement handle_write""" - self.log_info('unhandled write event', 'warning') - - def handle_connect(self): - """Log that the subclass does not implement handle_connect""" - self.log_info('unhandled connect event', 'warning') - - def handle_accepted(self, sock, addr): - """Log that the subclass does not implement handle_accepted""" - sock.close() - self.log_info('unhandled accepted event on %s' % (addr), 'warning') - - def handle_close(self): - """Log that the subclass does not implement handle_close""" - self.log_info('unhandled close event', 'warning') - self.close() - - -class dispatcher_with_send(dispatcher): - """ - adds simple buffered output capability, useful for simple clients. - [for more sophisticated usage use asynchat.async_chat] - """ - - def __init__(self, sock=None, map=None): - dispatcher.__init__(self, sock, map) - self.out_buffer = b'' - - def initiate_send(self): - """Initiate a send""" - num_sent = 0 - num_sent = dispatcher.send(self, self.out_buffer[:512]) - self.out_buffer = self.out_buffer[num_sent:] - - def handle_write(self): - """Handle a write event""" - self.initiate_send() - - def writable(self): - """Predicate to indicate if the object is writable""" - return not self.connected or len(self.out_buffer) - - def send(self, data): - """Send data""" - if self.debug: - self.log_info('sending %s' % repr(data)) - self.out_buffer = self.out_buffer + data - self.initiate_send() - - -# --------------------------------------------------------------------------- -# used for debugging. -# --------------------------------------------------------------------------- - - -def compact_traceback(): - """Return a compact traceback""" - t, v, tb = sys.exc_info() - tbinfo = [] - # Must have a traceback - if not tb: - raise AssertionError("traceback does not exist") - while tb: - tbinfo.append(( - tb.tb_frame.f_code.co_filename, - tb.tb_frame.f_code.co_name, - str(tb.tb_lineno) - )) - tb = tb.tb_next - - # just to be safe - del tb - - filename, function, line = tbinfo[-1] - info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo]) - return (filename, function, line), t, v, info - - -def close_all(map=None, ignore_all=False): - """Close all connections""" - - if map is None: - map = socket_map - for x in list(map.values()): - try: - x.close() - except OSError as e: - if e.args[0] == EBADF: - pass - elif not ignore_all: - raise - except _reraised_exceptions: - raise - except BaseException: - if not ignore_all: - raise - map.clear() - - -# Asynchronous File I/O: -# -# After a little research (reading man pages on various unixen, and -# digging through the linux kernel), I've determined that select() -# isn't meant for doing asynchronous file i/o. -# Heartening, though - reading linux/mm/filemap.c shows that linux -# supports asynchronous read-ahead. So _MOST_ of the time, the data -# will be sitting in memory for us already when we go to read it. -# -# What other OS's (besides NT) support async file i/o? [VMS?] -# -# Regardless, this is useful for pipes, and stdin/stdout... - - -if os.name == 'posix': - import fcntl - - class file_wrapper: # pylint: disable=old-style-class - """ - Here we override just enough to make a file look - like a socket for the purposes of asyncore. - - The passed fd is automatically os.dup()'d - """ - - def __init__(self, fd): - self.fd = os.dup(fd) - - def recv(self, *args): - """Fake recv()""" - return os.read(self.fd, *args) - - def send(self, *args): - """Fake send()""" - return os.write(self.fd, *args) - - def getsockopt(self, level, optname, buflen=None): - """Fake getsockopt()""" - if (level == socket.SOL_SOCKET and optname == socket.SO_ERROR - and not buflen): - return 0 - raise NotImplementedError( - "Only asyncore specific behaviour implemented.") - - read = recv - write = send - - def close(self): - """Fake close()""" - os.close(self.fd) - - def fileno(self): - """Fake fileno()""" - return self.fd - - class file_dispatcher(dispatcher): - """A dispatcher for file_wrapper objects""" - - def __init__(self, fd, map=None): - dispatcher.__init__(self, None, map) - self.connected = True - try: - fd = fd.fileno() - except AttributeError: - pass - self.set_file(fd) - # set it to non-blocking mode - flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0) - flags = flags | os.O_NONBLOCK - fcntl.fcntl(fd, fcntl.F_SETFL, flags) - - def set_file(self, fd): - """Set file""" - self.socket = file_wrapper(fd) - self._fileno = self.socket.fileno() - self.add_channel() diff --git a/src/tests/mock/pybitmessage/network/bmobject.py b/src/tests/mock/pybitmessage/network/bmobject.py deleted file mode 100644 index 12b997d7..00000000 --- a/src/tests/mock/pybitmessage/network/bmobject.py +++ /dev/null @@ -1,164 +0,0 @@ -""" -BMObject and it's exceptions. -""" -import logging -import time - -import protocol -import state -from addresses import calculateInventoryHash -from inventory import Inventory -from network.dandelion import Dandelion - -logger = logging.getLogger('default') - - -class BMObjectInsufficientPOWError(Exception): - """Exception indicating the object - doesn't have sufficient proof of work.""" - errorCodes = ("Insufficient proof of work") - - -class BMObjectInvalidDataError(Exception): - """Exception indicating the data being parsed - does not match the specification.""" - errorCodes = ("Data invalid") - - -class BMObjectExpiredError(Exception): - """Exception indicating the object's lifetime has expired.""" - errorCodes = ("Object expired") - - -class BMObjectUnwantedStreamError(Exception): - """Exception indicating the object is in a stream - we didn't advertise as being interested in.""" - errorCodes = ("Object in unwanted stream") - - -class BMObjectInvalidError(Exception): - """The object's data does not match object specification.""" - errorCodes = ("Invalid object") - - -class BMObjectAlreadyHaveError(Exception): - """We received a duplicate object (one we already have)""" - errorCodes = ("Already have this object") - - -class BMObject(object): # pylint: disable=too-many-instance-attributes - """Bitmessage Object as a class.""" - - # max TTL, 28 days and 3 hours - maxTTL = 28 * 24 * 60 * 60 + 10800 - # min TTL, 3 hour (in the past - minTTL = -3600 - - def __init__( - self, - nonce, - expiresTime, - objectType, - version, - streamNumber, - data, - payloadOffset - ): # pylint: disable=too-many-arguments - self.nonce = nonce - self.expiresTime = expiresTime - self.objectType = objectType - self.version = version - self.streamNumber = streamNumber - self.inventoryHash = calculateInventoryHash(data) - # copy to avoid memory issues - self.data = bytearray(data) - self.tag = self.data[payloadOffset:payloadOffset + 32] - - def checkProofOfWorkSufficient(self): - """Perform a proof of work check for sufficiency.""" - # Let us check to make sure that the proof of work is sufficient. - if not protocol.isProofOfWorkSufficient(self.data): - logger.info('Proof of work is insufficient.') - raise BMObjectInsufficientPOWError() - - def checkEOLSanity(self): - """Check if object's lifetime - isn't ridiculously far in the past or future.""" - # EOL sanity check - if self.expiresTime - int(time.time()) > BMObject.maxTTL: - logger.info( - 'This object\'s End of Life time is too far in the future.' - ' Ignoring it. Time is %i', self.expiresTime) - # .. todo:: remove from download queue - raise BMObjectExpiredError() - - if self.expiresTime - int(time.time()) < BMObject.minTTL: - logger.info( - 'This object\'s End of Life time was too long ago.' - ' Ignoring the object. Time is %i', self.expiresTime) - # .. todo:: remove from download queue - raise BMObjectExpiredError() - - def checkStream(self): - """Check if object's stream matches streams we are interested in""" - if self.streamNumber not in state.streamsInWhichIAmParticipating: - logger.debug( - 'The streamNumber %i isn\'t one we are interested in.', - self.streamNumber) - raise BMObjectUnwantedStreamError() - - def checkAlreadyHave(self): - """ - Check if we already have the object - (so that we don't duplicate it in inventory - or advertise it unnecessarily) - """ - # if it's a stem duplicate, pretend we don't have it - if Dandelion().hasHash(self.inventoryHash): - return - if self.inventoryHash in Inventory(): - raise BMObjectAlreadyHaveError() - - def checkObjectByType(self): - """Call a object type specific check - (objects can have additional checks based on their types)""" - if self.objectType == protocol.OBJECT_GETPUBKEY: - self.checkGetpubkey() - elif self.objectType == protocol.OBJECT_PUBKEY: - self.checkPubkey() - elif self.objectType == protocol.OBJECT_MSG: - self.checkMessage() - elif self.objectType == protocol.OBJECT_BROADCAST: - self.checkBroadcast() - # other objects don't require other types of tests - - def checkMessage(self): # pylint: disable=no-self-use - """"Message" object type checks.""" - return - - def checkGetpubkey(self): - """"Getpubkey" object type checks.""" - if len(self.data) < 42: - logger.info( - 'getpubkey message doesn\'t contain enough data. Ignoring.') - raise BMObjectInvalidError() - - def checkPubkey(self): - """"Pubkey" object type checks.""" - # sanity check - if len(self.data) < 146 or len(self.data) > 440: - logger.info('pubkey object too short or too long. Ignoring.') - raise BMObjectInvalidError() - - def checkBroadcast(self): - """"Broadcast" object type checks.""" - if len(self.data) < 180: - logger.debug( - 'The payload length of this broadcast' - ' packet is unreasonably low. Someone is probably' - ' trying funny business. Ignoring message.') - raise BMObjectInvalidError() - - # this isn't supported anymore - if self.version < 2: - raise BMObjectInvalidError() diff --git a/src/tests/mock/pybitmessage/network/bmproto.py b/src/tests/mock/pybitmessage/network/bmproto.py deleted file mode 100644 index 3d54b33c..00000000 --- a/src/tests/mock/pybitmessage/network/bmproto.py +++ /dev/null @@ -1,709 +0,0 @@ -""" -Class BMProto defines bitmessage's network protocol workflow. -""" - -import base64 -import hashlib -import logging -import re -import socket -import struct -import time -from binascii import hexlify - -from pybitmessage import addresses -import connectionpool -import knownnodes -from pybitmessage import protocol -from pybitmessage import state -from pybitmessage.bmconfigparser import BMConfigParser -from pybitmessage.inventory import Inventory -from pybitmessage.network.advanceddispatcher import AdvancedDispatcher -from pybitmessage.network.bmobject import ( - BMObject, BMObjectAlreadyHaveError, BMObjectExpiredError, - BMObjectInsufficientPOWError, BMObjectInvalidDataError, - BMObjectInvalidError, BMObjectUnwantedStreamError -) -from pybitmessage.network.constants import ( - ADDRESS_ALIVE, MAX_MESSAGE_SIZE, MAX_OBJECT_COUNT, - MAX_OBJECT_PAYLOAD_SIZE, MAX_TIME_OFFSET -) -from pybitmessage.network.dandelion import Dandelion -from pybitmessage.network.proxy import ProxyError -from node import Node, Peer -from objectracker import ObjectTracker, missingObjects -from pybitmessage.queues import invQueue, objectProcessorQueue, portCheckerQueue -from randomtrackingdict import RandomTrackingDict - -logger = logging.getLogger('default') - - -class BMProtoError(ProxyError): - """A Bitmessage Protocol Base Error""" - errorCodes = ("Protocol error") - - -class BMProtoInsufficientDataError(BMProtoError): - """A Bitmessage Protocol Insufficient Data Error""" - errorCodes = ("Insufficient data") - - -class BMProtoExcessiveDataError(BMProtoError): - """A Bitmessage Protocol Excessive Data Error""" - errorCodes = ("Too much data") - - -class BMProto(AdvancedDispatcher, ObjectTracker): - """A parser for the Bitmessage Protocol""" - # pylint: disable=too-many-instance-attributes, too-many-public-methods - timeOffsetWrongCount = 0 - - def __init__(self, address=None, sock=None): - # pylint: disable=unused-argument, super-init-not-called - AdvancedDispatcher.__init__(self, sock) - self.isOutbound = False - # packet/connection from a local IP - self.local = False - self.pendingUpload = RandomTrackingDict() - # canonical identifier of network group - self.network_group = None - # userAgent initialization - self.userAgent = '' - - def bm_proto_reset(self): - """Reset the bitmessage object parser""" - self.magic = None - self.command = None - self.payloadLength = 0 - self.checksum = None - self.payload = None - self.invalid = False - self.payloadOffset = 0 - self.expectBytes = protocol.Header.size - self.object = None - - def state_bm_header(self): - """Process incoming header""" - self.magic, self.command, self.payloadLength, self.checksum = \ - protocol.Header.unpack(self.read_buf[:protocol.Header.size]) - self.command = self.command.rstrip('\x00') - if self.magic != 0xE9BEB4D9: - # skip 1 byte in order to sync - self.set_state("bm_header", length=1) - self.bm_proto_reset() - logger.debug('Bad magic') - if self.socket.type == socket.SOCK_STREAM: - self.close_reason = "Bad magic" - self.set_state("close") - return False - if self.payloadLength > MAX_MESSAGE_SIZE: - self.invalid = True - self.set_state( - "bm_command", - length=protocol.Header.size, expectBytes=self.payloadLength) - return True - - def state_bm_command(self): # pylint: disable=too-many-branches - """Process incoming command""" - self.payload = self.read_buf[:self.payloadLength] - if self.checksum != hashlib.sha512(self.payload).digest()[0:4]: - logger.debug('Bad checksum, ignoring') - self.invalid = True - retval = True - if not self.fullyEstablished and self.command not in ( - "error", "version", "verack"): - logger.error( - 'Received command %s before connection was fully' - ' established, ignoring', self.command) - self.invalid = True - if not self.invalid: - try: - retval = getattr( - self, "bm_command_" + str(self.command).lower())() - except AttributeError: - # unimplemented command - logger.debug('unimplemented command %s', self.command) - except BMProtoInsufficientDataError: - logger.debug('packet length too short, skipping') - except BMProtoExcessiveDataError: - logger.debug('too much data, skipping') - except BMObjectInsufficientPOWError: - logger.debug('insufficient PoW, skipping') - except BMObjectInvalidDataError: - logger.debug('object invalid data, skipping') - except BMObjectExpiredError: - logger.debug('object expired, skipping') - except BMObjectUnwantedStreamError: - logger.debug('object not in wanted stream, skipping') - except BMObjectInvalidError: - logger.debug('object invalid, skipping') - except BMObjectAlreadyHaveError: - logger.debug( - '%(host)s:%(port)i already got object, skipping', - self.destination._asdict()) - except struct.error: - logger.debug('decoding error, skipping') - elif self.socket.type == socket.SOCK_DGRAM: - # broken read, ignore - pass - else: - logger.debug('Closing due to invalid command %s', self.command) - self.close_reason = "Invalid command %s" % self.command - self.set_state("close") - return False - if retval: - self.set_state("bm_header", length=self.payloadLength) - self.bm_proto_reset() - # else assume the command requires a different state to follow - return True - - def decode_payload_string(self, length): - """Read and return `length` bytes from payload""" - value = self.payload[self.payloadOffset:self.payloadOffset + length] - self.payloadOffset += length - return value - - def decode_payload_varint(self): - """Decode a varint from the payload""" - value, offset = addresses.decodeVarint( - self.payload[self.payloadOffset:]) - self.payloadOffset += offset - return value - - def decode_payload_node(self): - """Decode node details from the payload""" - # protocol.checkIPAddress() - services, host, port = self.decode_payload_content("Q16sH") - if host[0:12] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF': - host = socket.inet_ntop(socket.AF_INET, str(host[12:16])) - elif host[0:6] == '\xfd\x87\xd8\x7e\xeb\x43': - # Onion, based on BMD/bitcoind - host = base64.b32encode(host[6:]).lower() + ".onion" - else: - host = socket.inet_ntop(socket.AF_INET6, str(host)) - if host == "": - # This can happen on Windows systems which are not 64-bit - # compatible so let us drop the IPv6 address. - host = socket.inet_ntop(socket.AF_INET, str(host[12:16])) - - return Node(services, host, port) - - # pylint: disable=too-many-branches,too-many-statements - def decode_payload_content(self, pattern="v"): - """ - Decode the payload depending on pattern: - - L = varint indicating the length of the next array - l = varint indicating the length of the next item - v = varint (or array) - H = uint16 - I = uint32 - Q = uint64 - i = net_addr (without time and stream number) - s = string - 0-9 = length of the next item - , = end of array - """ - - def decode_simple(self, char="v"): - """Decode the payload using one char pattern""" - if char == "v": - return self.decode_payload_varint() - if char == "i": - return self.decode_payload_node() - if char == "H": - self.payloadOffset += 2 - return struct.unpack(">H", self.payload[ - self.payloadOffset - 2:self.payloadOffset])[0] - if char == "I": - self.payloadOffset += 4 - return struct.unpack(">I", self.payload[ - self.payloadOffset - 4:self.payloadOffset])[0] - if char == "Q": - self.payloadOffset += 8 - return struct.unpack(">Q", self.payload[ - self.payloadOffset - 8:self.payloadOffset])[0] - return None - - size = None - isArray = False - - # size - # iterator starting from size counting to 0 - # isArray? - # subpattern - # position of parser in subpattern - # retval (array) - parserStack = [[1, 1, False, pattern, 0, []]] - - while True: - i = parserStack[-1][3][parserStack[-1][4]] - if i in "0123456789" and ( - size is None or parserStack[-1][3][parserStack[-1][4] - 1] - not in "lL"): - try: - size = size * 10 + int(i) - except TypeError: - size = int(i) - isArray = False - elif i in "Ll" and size is None: - size = self.decode_payload_varint() - isArray = i == "L" - elif size is not None: - if isArray: - parserStack.append([ - size, size, isArray, - parserStack[-1][3][parserStack[-1][4]:], 0, [] - ]) - parserStack[-2][4] = len(parserStack[-2][3]) - else: - j = 0 - for j in range( - parserStack[-1][4], len(parserStack[-1][3])): - if parserStack[-1][3][j] not in "lL0123456789": - break - parserStack.append([ - size, size, isArray, - parserStack[-1][3][parserStack[-1][4]:j + 1], 0, [] - ]) - parserStack[-2][4] += len(parserStack[-1][3]) - 1 - size = None - continue - elif i == "s": - # if parserStack[-2][2]: - # parserStack[-1][5].append(self.payload[ - # self.payloadOffset:self.payloadOffset - # + parserStack[-1][0]]) - # else: - parserStack[-1][5] = self.payload[ - self.payloadOffset:self.payloadOffset + parserStack[-1][0]] - self.payloadOffset += parserStack[-1][0] - parserStack[-1][1] = 0 - parserStack[-1][2] = True - # del parserStack[-1] - size = None - elif i in "viHIQ": - parserStack[-1][5].append(decode_simple( - self, parserStack[-1][3][parserStack[-1][4]])) - size = None - else: - size = None - for depth in range(len(parserStack) - 1, -1, -1): - parserStack[depth][4] += 1 - if parserStack[depth][4] >= len(parserStack[depth][3]): - parserStack[depth][1] -= 1 - parserStack[depth][4] = 0 - if depth > 0: - if parserStack[depth][2]: - parserStack[depth - 1][5].append( - parserStack[depth][5]) - else: - parserStack[depth - 1][5].extend( - parserStack[depth][5]) - parserStack[depth][5] = [] - if parserStack[depth][1] <= 0: - if depth == 0: - # we're done, at depth 0 counter is at 0 - # and pattern is done parsing - return parserStack[depth][5] - del parserStack[-1] - continue - break - break - if self.payloadOffset > self.payloadLength: - logger.debug( - 'Insufficient data %i/%i', - self.payloadOffset, self.payloadLength) - raise BMProtoInsufficientDataError() - - def bm_command_error(self): - """Decode an error message and log it""" - err_values = self.decode_payload_content("vvlsls") - fatalStatus = err_values[0] - # banTime = err_values[1] - # inventoryVector = err_values[2] - errorText = err_values[3] - logger.error( - '%s:%i error: %i, %s', self.destination.host, - self.destination.port, fatalStatus, errorText) - return True - - def bm_command_getdata(self): - """ - Incoming request for object(s). - If we have them and some other conditions are fulfilled, - append them to the write queue. - """ - items = self.decode_payload_content("l32s") - # skip? - now = time.time() - if now < self.skipUntil: - return True - for i in items: - self.pendingUpload[str(i)] = now - return True - - def _command_inv(self, dandelion=False): - """ - Common inv announce implementation: - both inv and dinv depending on *dandelion* kwarg - """ - items = self.decode_payload_content("l32s") - - if len(items) > MAX_OBJECT_COUNT: - logger.error( - 'Too many items in %sinv message!', 'd' if dandelion else '') - raise BMProtoExcessiveDataError() - - # ignore dinv if dandelion turned off - if dandelion and not state.dandelion: - return True - - for i in map(str, items): - if i in Inventory() and not Dandelion().hasHash(i): - continue - if dandelion and not Dandelion().hasHash(i): - Dandelion().addHash(i, self) - self.handleReceivedInventory(i) - - return True - - def bm_command_inv(self): - """Non-dandelion announce""" - return self._command_inv(False) - - def bm_command_dinv(self): - """Dandelion stem announce""" - return self._command_inv(True) - - def bm_command_object(self): - """Incoming object, process it""" - objectOffset = self.payloadOffset - nonce, expiresTime, objectType, version, streamNumber = \ - self.decode_payload_content("QQIvv") - self.object = BMObject( - nonce, expiresTime, objectType, version, streamNumber, - self.payload, self.payloadOffset) - - payload_len = len(self.payload) - self.payloadOffset - if payload_len > MAX_OBJECT_PAYLOAD_SIZE: - logger.info( - 'The payload length of this object is too large' - ' (%d bytes). Ignoring it.', payload_len) - raise BMProtoExcessiveDataError() - - try: - self.object.checkProofOfWorkSufficient() - self.object.checkEOLSanity() - self.object.checkAlreadyHave() - except (BMObjectExpiredError, BMObjectAlreadyHaveError, - BMObjectInsufficientPOWError): - BMProto.stopDownloadingObject(self.object.inventoryHash) - raise - try: - self.object.checkStream() - except BMObjectUnwantedStreamError: - acceptmismatch = BMConfigParser().get( - "inventory", "acceptmismatch") - BMProto.stopDownloadingObject( - self.object.inventoryHash, acceptmismatch) - if not acceptmismatch: - raise - - try: - self.object.checkObjectByType() - objectProcessorQueue.put(( - self.object.objectType, buffer(self.object.data))) - except BMObjectInvalidError: - BMProto.stopDownloadingObject(self.object.inventoryHash, True) - else: - try: - del missingObjects[self.object.inventoryHash] - except KeyError: - pass - - if self.object.inventoryHash in Inventory() and Dandelion().hasHash( - self.object.inventoryHash): - Dandelion().removeHash( - self.object.inventoryHash, "cycle detection") - - Inventory()[self.object.inventoryHash] = ( - self.object.objectType, self.object.streamNumber, - buffer(self.payload[objectOffset:]), self.object.expiresTime, - buffer(self.object.tag) - ) - self.handleReceivedObject( - self.object.streamNumber, self.object.inventoryHash) - invQueue.put(( - self.object.streamNumber, self.object.inventoryHash, - self.destination)) - return True - - def _decode_addr(self): - return self.decode_payload_content("LQIQ16sH") - - def bm_command_addr(self): - """Incoming addresses, process them""" - # not using services - for seenTime, stream, _, ip, port in self._decode_addr(): - ip = str(ip) - if ( - stream not in state.streamsInWhichIAmParticipating - # FIXME: should check against complete list - or ip.startswith('bootstrap') - ): - continue - decodedIP = protocol.checkIPAddress(ip) - if ( - decodedIP and time.time() - seenTime > 0 - and seenTime > time.time() - ADDRESS_ALIVE - and port > 0 - ): - peer = Peer(decodedIP, port) - - with knownnodes.knownNodesLock: - # isnew = - knownnodes.addKnownNode(stream, peer, seenTime) - - # since we don't track peers outside of knownnodes, - # only spread if in knownnodes to prevent flood - # DISABLED TO WORKAROUND FLOOD/LEAK - # if isnew: - # addrQueue.put(( - # stream, peer, seenTime, self.destination)) - return True - - def bm_command_portcheck(self): - """Incoming port check request, queue it.""" - portCheckerQueue.put(Peer(self.destination, self.peerNode.port)) - return True - - def bm_command_ping(self): - """Incoming ping, respond to it.""" - self.append_write_buf(protocol.CreatePacket('pong')) - return True - - @staticmethod - def bm_command_pong(): - """ - Incoming pong. - Ignore it. PyBitmessage pings connections after about 5 minutes - of inactivity, and leaves it to the TCP stack to handle actual - timeouts. So there is no need to do anything when a pong arrives. - """ - # nothing really - return True - - def bm_command_verack(self): - """ - Incoming verack. - If already sent my own verack, handshake is complete (except - potentially waiting for buffers to flush), so we can continue - to the main connection phase. If not sent verack yet, - continue processing. - """ - self.verackReceived = True - if not self.verackSent: - return True - self.set_state( - "tls_init" if self.isSSL else "connection_fully_established", - length=self.payloadLength, expectBytes=0) - return False - - def bm_command_version(self): - """ - Incoming version. - Parse and log, remember important things, like streams, bitfields, etc. - """ - decoded = self.decode_payload_content("IQQiiQlslv") - (self.remoteProtocolVersion, self.services, self.timestamp, - self.sockNode, self.peerNode, self.nonce, self.userAgent - ) = decoded[:7] - self.streams = decoded[7:] - self.nonce = struct.pack('>Q', self.nonce) - self.timeOffset = self.timestamp - int(time.time()) - logger.debug('remoteProtocolVersion: %i', self.remoteProtocolVersion) - logger.debug('services: 0x%08X', self.services) - logger.debug('time offset: %i', self.timeOffset) - logger.debug('my external IP: %s', self.sockNode.host) - logger.debug( - 'remote node incoming address: %s:%i', - self.destination.host, self.peerNode.port) - logger.debug('user agent: %s', self.userAgent) - logger.debug('streams: [%s]', ','.join(map(str, self.streams))) - if not self.peerValidityChecks(): - # ABORT afterwards - return True - self.append_write_buf(protocol.CreatePacket('verack')) - self.verackSent = True - ua_valid = re.match( - r'^/[a-zA-Z]+:[0-9]+\.?[\w\s\(\)\./:;-]*/$', self.userAgent) - if not ua_valid: - self.userAgent = '/INVALID:0/' - if not self.isOutbound: - self.append_write_buf(protocol.assembleVersionMessage( - self.destination.host, self.destination.port, - connectionpool.BMConnectionPool().streams, True, - nodeid=self.nodeid)) - logger.debug( - '%(host)s:%(port)i sending version', - self.destination._asdict()) - if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) - and protocol.haveSSL(not self.isOutbound)): - self.isSSL = True - if not self.verackReceived: - return True - self.set_state( - "tls_init" if self.isSSL else "connection_fully_established", - length=self.payloadLength, expectBytes=0) - return False - - # pylint: disable=too-many-return-statements - def peerValidityChecks(self): - """Check the validity of the peer""" - if self.remoteProtocolVersion < 3: - self.append_write_buf(protocol.assembleErrorMessage( - errorText="Your is using an old protocol. Closing connection.", - fatal=2)) - logger.debug( - 'Closing connection to old protocol version %s, node: %s', - self.remoteProtocolVersion, self.destination) - return False - if self.timeOffset > MAX_TIME_OFFSET: - self.append_write_buf(protocol.assembleErrorMessage( - errorText="Your time is too far in the future" - " compared to mine. Closing connection.", fatal=2)) - logger.info( - "%s's time is too far in the future (%s seconds)." - " Closing connection to it.", - self.destination, self.timeOffset) - BMProto.timeOffsetWrongCount += 1 - return False - elif self.timeOffset < -MAX_TIME_OFFSET: - self.append_write_buf(protocol.assembleErrorMessage( - errorText="Your time is too far in the past compared to mine." - " Closing connection.", fatal=2)) - logger.info( - "%s's time is too far in the past" - " (timeOffset %s seconds). Closing connection to it.", - self.destination, self.timeOffset) - BMProto.timeOffsetWrongCount += 1 - return False - else: - BMProto.timeOffsetWrongCount = 0 - if not self.streams: - self.append_write_buf(protocol.assembleErrorMessage( - errorText="We don't have shared stream interests." - " Closing connection.", fatal=2)) - logger.debug( - 'Closed connection to %s because there is no overlapping' - ' interest in streams.', self.destination) - return False - if connectionpool.BMConnectionPool().inboundConnections.get( - self.destination): - try: - if not protocol.checkSocksIP(self.destination.host): - self.append_write_buf(protocol.assembleErrorMessage( - errorText="Too many connections from your IP." - " Closing connection.", fatal=2)) - logger.debug( - 'Closed connection to %s because we are already' - ' connected to that IP.', self.destination) - return False - except Exception: # TODO: exception types - pass - if not self.isOutbound: - # incoming from a peer we're connected to as outbound, - # or server full report the same error to counter deanonymisation - if ( - Peer(self.destination.host, self.peerNode.port) - in connectionpool.BMConnectionPool().inboundConnections - or len(connectionpool.BMConnectionPool()) - > BMConfigParser().safeGetInt( - 'bitmessagesettings', 'maxtotalconnections') - + BMConfigParser().safeGetInt( - 'bitmessagesettings', 'maxbootstrapconnections') - ): - self.append_write_buf(protocol.assembleErrorMessage( - errorText="Server full, please try again later.", fatal=2)) - logger.debug( - 'Closed connection to %s due to server full' - ' or duplicate inbound/outbound.', self.destination) - return False - if connectionpool.BMConnectionPool().isAlreadyConnected(self.nonce): - self.append_write_buf(protocol.assembleErrorMessage( - errorText="I'm connected to myself. Closing connection.", - fatal=2)) - logger.debug( - "Closed connection to %s because I'm connected to myself.", - self.destination) - return False - - return True - - @staticmethod - def stopDownloadingObject(hashId, forwardAnyway=False): - """Stop downloading object *hashId*""" - for connection in connectionpool.BMConnectionPool().connections(): - try: - del connection.objectsNewToMe[hashId] - except KeyError: - pass - if not forwardAnyway: - try: - with connection.objectsNewToThemLock: - del connection.objectsNewToThem[hashId] - except KeyError: - pass - try: - del missingObjects[hashId] - except KeyError: - pass - - def handle_close(self): - """Handle close""" - self.set_state("close") - if not (self.accepting or self.connecting or self.connected): - # already disconnected - return - try: - logger.debug( - '%s:%i: closing, %s', self.destination.host, - self.destination.port, self.close_reason) - except AttributeError: - try: - logger.debug( - '%s:%i: closing', - self.destination.host, self.destination.port) - except AttributeError: - logger.debug('Disconnected socket closing') - AdvancedDispatcher.handle_close(self) - - -class BMStringParser(BMProto): - """ - A special case of BMProto used by objectProcessor to send ACK - """ - def __init__(self): - super(BMStringParser, self).__init__() - self.destination = Peer('127.0.0.1', 8444) - self.payload = None - ObjectTracker.__init__(self) - - def send_data(self, data): - """Send object given by the data string""" - # This class is introduced specially for ACK sending, please - # change log strings if you are going to use it for something else - self.bm_proto_reset() - self.payload = data - try: - self.bm_command_object() - except BMObjectAlreadyHaveError: - pass # maybe the same msg received on different nodes - except BMObjectExpiredError: - logger.debug( - 'Sending ACK failure (expired): %s', hexlify(data)) - except Exception as e: - logger.debug( - 'Exception of type %s while sending ACK', - type(e), exc_info=True) diff --git a/src/tests/mock/pybitmessage/network/connectionchooser.py b/src/tests/mock/pybitmessage/network/connectionchooser.py deleted file mode 100644 index edac86b7..00000000 --- a/src/tests/mock/pybitmessage/network/connectionchooser.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Select which node to connect to -""" -# pylint: disable=too-many-branches -import logging -import random # nosec - -import knownnodes -from pybitmessage import protocol -from pybitmessage import state -from pybitmessage.bmconfigparser import BMConfigParser -from pybitmessage.queues import queue, portCheckerQueue - -logger = logging.getLogger('default') - - -def getDiscoveredPeer(): - """Get a peer from the local peer discovery list""" - try: - peer = random.choice(state.discoveredPeers.keys()) - except (IndexError, KeyError): - raise ValueError - try: - del state.discoveredPeers[peer] - except KeyError: - pass - return peer - - -def chooseConnection(stream): - """Returns an appropriate connection""" - haveOnion = BMConfigParser().safeGet( - "bitmessagesettings", "socksproxytype")[0:5] == 'SOCKS' - onionOnly = BMConfigParser().safeGetBoolean( - "bitmessagesettings", "onionservicesonly") - try: - retval = portCheckerQueue.get(False) - portCheckerQueue.task_done() - return retval - except queue.Empty: - pass - # with a probability of 0.5, connect to a discovered peer - if random.choice((False, True)) and not haveOnion: - # discovered peers are already filtered by allowed streams - return getDiscoveredPeer() - for _ in range(50): - peer = random.choice(knownnodes.knownNodes[stream].keys()) - try: - peer_info = knownnodes.knownNodes[stream][peer] - if peer_info.get('self'): - continue - rating = peer_info["rating"] - except TypeError: - logger.warning('Error in %s', peer) - rating = 0 - if haveOnion: - # do not connect to raw IP addresses - # --keep all traffic within Tor overlay - if onionOnly and not peer.host.endswith('.onion'): - continue - # onion addresses have a higher priority when SOCKS - if peer.host.endswith('.onion') and rating > 0: - rating = 1 - # TODO: need better check - elif not peer.host.startswith('bootstrap'): - encodedAddr = protocol.encodeHost(peer.host) - # don't connect to local IPs when using SOCKS - if not protocol.checkIPAddress(encodedAddr, False): - continue - if rating > 1: - rating = 1 - try: - if 0.05 / (1.0 - rating) > random.random(): - return peer - except ZeroDivisionError: - return peer - raise ValueError diff --git a/src/tests/mock/pybitmessage/network/connectionpool.py b/src/tests/mock/pybitmessage/network/connectionpool.py deleted file mode 100644 index 4b67fa3c..00000000 --- a/src/tests/mock/pybitmessage/network/connectionpool.py +++ /dev/null @@ -1,405 +0,0 @@ -""" -`BMConnectionPool` class definition -""" -import errno -import logging -import re -import socket -import sys -import time - -import asyncore_pollchoose as asyncore -from pybitmessage import helper_random -import knownnodes -from pybitmessage import protocol -from pybitmessage import state -from pybitmessage.bmconfigparser import BMConfigParser -from connectionchooser import chooseConnection -from node import Peer -from proxy import Proxy -from singleton import Singleton -from tcp import ( - bootstrap, Socks4aBMConnection, Socks5BMConnection, - TCPConnection, TCPServer) -from udp import UDPSocket - -logger = logging.getLogger('default') - - -@Singleton -class BMConnectionPool(object): - """Pool of all existing connections""" - # pylint: disable=too-many-instance-attributes - - trustedPeer = None - """ - If the trustedpeer option is specified in keys.dat then this will - contain a Peer which will be connected to instead of using the - addresses advertised by other peers. - - The expected use case is where the user has a trusted server where - they run a Bitmessage daemon permanently. If they then run a second - instance of the client on a local machine periodically when they want - to check for messages it will sync with the network a lot faster - without compromising security. - """ - - def __init__(self): - asyncore.set_rates( - BMConfigParser().safeGetInt( - "bitmessagesettings", "maxdownloadrate"), - BMConfigParser().safeGetInt( - "bitmessagesettings", "maxuploadrate") - ) - self.outboundConnections = {} - self.inboundConnections = {} - self.listeningSockets = {} - self.udpSockets = {} - self.streams = [] - self._lastSpawned = 0 - self._spawnWait = 2 - self._bootstrapped = False - - trustedPeer = BMConfigParser().safeGet( - 'bitmessagesettings', 'trustedpeer') - try: - if trustedPeer: - host, port = trustedPeer.split(':') - self.trustedPeer = Peer(host, int(port)) - except ValueError: - sys.exit( - 'Bad trustedpeer config setting! It should be set as' - ' trustedpeer=:' - ) - - def __len__(self): - return len(self.outboundConnections) + len(self.inboundConnections) - - def connections(self): - """ - Shortcut for combined list of connections from - `inboundConnections` and `outboundConnections` dicts - """ - return self.inboundConnections.values() + self.outboundConnections.values() - - def establishedConnections(self): - """Shortcut for list of connections having fullyEstablished == True""" - return [ - x for x in self.connections() if x.fullyEstablished] - - def connectToStream(self, streamNumber): - """Connect to a bitmessage stream""" - self.streams.append(streamNumber) - state.streamsInWhichIAmParticipating.append(streamNumber) - - def getConnectionByAddr(self, addr): - """ - Return an (existing) connection object based on a `Peer` object - (IP and port) - """ - try: - return self.inboundConnections[addr] - except KeyError: - pass - try: - return self.inboundConnections[addr.host] - except (KeyError, AttributeError): - pass - try: - return self.outboundConnections[addr] - except KeyError: - pass - try: - return self.udpSockets[addr.host] - except (KeyError, AttributeError): - pass - raise KeyError - - def isAlreadyConnected(self, nodeid): - """Check if we're already connected to this peer""" - for i in self.connections(): - try: - if nodeid == i.nodeid: - return True - except AttributeError: - pass - return False - - def addConnection(self, connection): - """Add a connection object to our internal dict""" - if isinstance(connection, UDPSocket): - return - if connection.isOutbound: - self.outboundConnections[connection.destination] = connection - else: - if connection.destination.host in self.inboundConnections: - self.inboundConnections[connection.destination] = connection - else: - self.inboundConnections[connection.destination.host] = \ - connection - - def removeConnection(self, connection): - """Remove a connection from our internal dict""" - if isinstance(connection, UDPSocket): - del self.udpSockets[connection.listening.host] - elif isinstance(connection, TCPServer): - del self.listeningSockets[Peer( - connection.destination.host, connection.destination.port)] - elif connection.isOutbound: - try: - del self.outboundConnections[connection.destination] - except KeyError: - pass - else: - try: - del self.inboundConnections[connection.destination] - except KeyError: - try: - del self.inboundConnections[connection.destination.host] - except KeyError: - pass - connection.handle_close() - - @staticmethod - def getListeningIP(): - """What IP are we supposed to be listening on?""" - if BMConfigParser().safeGet( - "bitmessagesettings", "onionhostname").endswith(".onion"): - host = BMConfigParser().safeGet( - "bitmessagesettings", "onionbindip") - else: - host = '127.0.0.1' - if ( - BMConfigParser().safeGetBoolean("bitmessagesettings", "sockslisten") - or BMConfigParser().safeGet("bitmessagesettings", "socksproxytype") - == "none" - ): - # python doesn't like bind + INADDR_ANY? - # host = socket.INADDR_ANY - host = BMConfigParser().get("network", "bind") - return host - - def startListening(self, bind=None): - """Open a listening socket and start accepting connections on it""" - if bind is None: - bind = self.getListeningIP() - port = BMConfigParser().safeGetInt("bitmessagesettings", "port") - # correct port even if it changed - ls = TCPServer(host=bind, port=port) - self.listeningSockets[ls.destination] = ls - - def startUDPSocket(self, bind=None): - """ - Open an UDP socket. Depending on settings, it can either only - accept incoming UDP packets, or also be able to send them. - """ - if bind is None: - host = self.getListeningIP() - udpSocket = UDPSocket(host=host, announcing=True) - else: - if bind is False: - udpSocket = UDPSocket(announcing=False) - else: - udpSocket = UDPSocket(host=bind, announcing=True) - self.udpSockets[udpSocket.listening.host] = udpSocket - - def startBootstrappers(self): - """Run the process of resolving bootstrap hostnames""" - proxy_type = BMConfigParser().safeGet( - 'bitmessagesettings', 'socksproxytype') - # A plugins may be added here - hostname = None - if not proxy_type or proxy_type == 'none': - connection_base = TCPConnection - elif proxy_type == 'SOCKS5': - connection_base = Socks5BMConnection - hostname = helper_random.randomchoice([ - 'quzwelsuziwqgpt2.onion', None - ]) - elif proxy_type == 'SOCKS4a': - connection_base = Socks4aBMConnection # FIXME: I cannot test - else: - # This should never happen because socksproxytype setting - # is handled in bitmessagemain before starting the connectionpool - return - - bootstrapper = bootstrap(connection_base) - if not hostname: - port = helper_random.randomchoice([8080, 8444]) - hostname = 'bootstrap%s.bitmessage.org' % port - else: - port = 8444 - self.addConnection(bootstrapper(hostname, port)) - - def loop(self): # pylint: disable=too-many-branches,too-many-statements - """Main Connectionpool's loop""" - # pylint: disable=too-many-locals - # defaults to empty loop if outbound connections are maxed - spawnConnections = False - acceptConnections = True - if BMConfigParser().safeGetBoolean( - 'bitmessagesettings', 'dontconnect'): - acceptConnections = False - elif BMConfigParser().safeGetBoolean( - 'bitmessagesettings', 'sendoutgoingconnections'): - spawnConnections = True - socksproxytype = BMConfigParser().safeGet( - 'bitmessagesettings', 'socksproxytype', '') - onionsocksproxytype = BMConfigParser().safeGet( - 'bitmessagesettings', 'onionsocksproxytype', '') - if ( - socksproxytype[:5] == 'SOCKS' - and not BMConfigParser().safeGetBoolean( - 'bitmessagesettings', 'sockslisten') - and '.onion' not in BMConfigParser().safeGet( - 'bitmessagesettings', 'onionhostname', '') - ): - acceptConnections = False - - # pylint: disable=too-many-nested-blocks - if spawnConnections: - if not knownnodes.knownNodesActual: - self.startBootstrappers() - knownnodes.knownNodesActual = True - if not self._bootstrapped: - self._bootstrapped = True - Proxy.proxy = ( - BMConfigParser().safeGet( - 'bitmessagesettings', 'sockshostname'), - BMConfigParser().safeGetInt( - 'bitmessagesettings', 'socksport') - ) - # TODO AUTH - # TODO reset based on GUI settings changes - try: - if not onionsocksproxytype.startswith("SOCKS"): - raise ValueError - Proxy.onion_proxy = ( - BMConfigParser().safeGet( - 'network', 'onionsockshostname', None), - BMConfigParser().safeGet( - 'network', 'onionsocksport', None) - ) - except ValueError: - Proxy.onion_proxy = None - established = sum( - 1 for c in self.outboundConnections.values() - if (c.connected and c.fullyEstablished)) - pending = len(self.outboundConnections) - established - if established < BMConfigParser().safeGetInt( - 'bitmessagesettings', 'maxoutboundconnections'): - for i in range( - state.maximumNumberOfHalfOpenConnections - pending): - try: - chosen = self.trustedPeer or chooseConnection( - helper_random.randomchoice(self.streams)) - except ValueError: - continue - if chosen in self.outboundConnections: - continue - if chosen.host in self.inboundConnections: - continue - # don't connect to self - if chosen in state.ownAddresses: - continue - # don't connect to the hosts from the same - # network group, defense against sibyl attacks - host_network_group = protocol.network_group( - chosen.host) - same_group = False - for j in self.outboundConnections.values(): - if host_network_group == j.network_group: - same_group = True - if chosen.host == j.destination.host: - knownnodes.decreaseRating(chosen) - break - if same_group: - continue - - try: - if chosen.host.endswith(".onion") and Proxy.onion_proxy: - if onionsocksproxytype == "SOCKS5": - self.addConnection(Socks5BMConnection(chosen)) - elif onionsocksproxytype == "SOCKS4a": - self.addConnection(Socks4aBMConnection(chosen)) - elif socksproxytype == "SOCKS5": - self.addConnection(Socks5BMConnection(chosen)) - elif socksproxytype == "SOCKS4a": - self.addConnection(Socks4aBMConnection(chosen)) - else: - self.addConnection(TCPConnection(chosen)) - except socket.error as e: - if e.errno == errno.ENETUNREACH: - continue - - self._lastSpawned = time.time() - else: - for i in self.connections(): - # FIXME: rating will be increased after next connection - i.handle_close() - - if acceptConnections: - if not self.listeningSockets: - if BMConfigParser().safeGet('network', 'bind') == '': - self.startListening() - else: - for bind in re.sub( - r'[^\w.]+', ' ', - BMConfigParser().safeGet('network', 'bind') - ).split(): - self.startListening(bind) - logger.info('Listening for incoming connections.') - if not self.udpSockets: - if BMConfigParser().safeGet('network', 'bind') == '': - self.startUDPSocket() - else: - for bind in re.sub( - r'[^\w.]+', ' ', - BMConfigParser().safeGet('network', 'bind') - ).split(): - self.startUDPSocket(bind) - self.startUDPSocket(False) - logger.info('Starting UDP socket(s).') - else: - if self.listeningSockets: - for i in self.listeningSockets.values(): - i.close_reason = "Stopping listening" - i.accepting = i.connecting = i.connected = False - logger.info('Stopped listening for incoming connections.') - if self.udpSockets: - for i in self.udpSockets.values(): - i.close_reason = "Stopping UDP socket" - i.accepting = i.connecting = i.connected = False - logger.info('Stopped udp sockets.') - - loopTime = float(self._spawnWait) - if self._lastSpawned < time.time() - self._spawnWait: - loopTime = 2.0 - asyncore.loop(timeout=loopTime, count=1000) - - reaper = [] - for i in self.connections(): - minTx = time.time() - 20 - if i.fullyEstablished: - minTx -= 300 - 20 - if i.lastTx < minTx: - if i.fullyEstablished: - i.append_write_buf(protocol.CreatePacket('ping')) - else: - i.close_reason = "Timeout (%is)" % ( - time.time() - i.lastTx) - i.set_state("close") - for i in ( - self.connections() - + self.listeningSockets.values() + self.udpSockets.values() - ): - if not (i.accepting or i.connecting or i.connected): - reaper.append(i) - else: - try: - if i.state == "close": - reaper.append(i) - except AttributeError: - pass - for i in reaper: - self.removeConnection(i) diff --git a/src/tests/mock/pybitmessage/network/constants.py b/src/tests/mock/pybitmessage/network/constants.py deleted file mode 100644 index f8f4120f..00000000 --- a/src/tests/mock/pybitmessage/network/constants.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Network protocol constants -""" - - -#: address is online if online less than this many seconds ago -ADDRESS_ALIVE = 10800 -#: protocol specification says max 1000 addresses in one addr command -MAX_ADDR_COUNT = 1000 -#: ~1.6 MB which is the maximum possible size of an inv message. -MAX_MESSAGE_SIZE = 1600100 -#: 2**18 = 256kB is the maximum size of an object payload -MAX_OBJECT_PAYLOAD_SIZE = 2**18 -#: protocol specification says max 50000 objects in one inv command -MAX_OBJECT_COUNT = 50000 -#: maximum time offset -MAX_TIME_OFFSET = 3600 diff --git a/src/tests/mock/pybitmessage/network/dandelion.py b/src/tests/mock/pybitmessage/network/dandelion.py deleted file mode 100644 index 03f45bd7..00000000 --- a/src/tests/mock/pybitmessage/network/dandelion.py +++ /dev/null @@ -1,196 +0,0 @@ -""" -Dandelion class definition, tracks stages -""" -import logging -from collections import namedtuple -from random import choice, expovariate, sample -from threading import RLock -from time import time - -import connectionpool -import state -from queues import invQueue -from singleton import Singleton - -# randomise routes after 600 seconds -REASSIGN_INTERVAL = 600 - -# trigger fluff due to expiration -FLUFF_TRIGGER_FIXED_DELAY = 10 -FLUFF_TRIGGER_MEAN_DELAY = 30 - -MAX_STEMS = 2 - -Stem = namedtuple('Stem', ['child', 'stream', 'timeout']) - -logger = logging.getLogger('default') - - -@Singleton -class Dandelion: # pylint: disable=old-style-class - """Dandelion class for tracking stem/fluff stages.""" - def __init__(self): - # currently assignable child stems - self.stem = [] - # currently assigned parent <-> child mappings - self.nodeMap = {} - # currently existing objects in stem mode - self.hashMap = {} - # when to rerandomise routes - self.refresh = time() + REASSIGN_INTERVAL - self.lock = RLock() - - @staticmethod - def poissonTimeout(start=None, average=0): - """Generate deadline using Poisson distribution""" - if start is None: - start = time() - if average == 0: - average = FLUFF_TRIGGER_MEAN_DELAY - return start + expovariate(1.0 / average) + FLUFF_TRIGGER_FIXED_DELAY - - def addHash(self, hashId, source=None, stream=1): - """Add inventory vector to dandelion stem""" - if not state.dandelion: - return - with self.lock: - self.hashMap[hashId] = Stem( - self.getNodeStem(source), - stream, - self.poissonTimeout()) - - def setHashStream(self, hashId, stream=1): - """ - Update stream for inventory vector (as inv/dinv commands don't - include streams, we only learn this after receiving the object) - """ - with self.lock: - if hashId in self.hashMap: - self.hashMap[hashId] = Stem( - self.hashMap[hashId].child, - stream, - self.poissonTimeout()) - - def removeHash(self, hashId, reason="no reason specified"): - """Switch inventory vector from stem to fluff mode""" - if logger.isEnabledFor(logging.DEBUG): - logger.debug( - '%s entering fluff mode due to %s.', - ''.join('%02x' % ord(i) for i in hashId), reason) - with self.lock: - try: - del self.hashMap[hashId] - except KeyError: - pass - - def hasHash(self, hashId): - """Is inventory vector in stem mode?""" - return hashId in self.hashMap - - def objectChildStem(self, hashId): - """Child (i.e. next) node for an inventory vector during stem mode""" - return self.hashMap[hashId].child - - def maybeAddStem(self, connection): - """ - If we had too few outbound connections, add the current one to the - current stem list. Dandelion as designed by the authors should - always have two active stem child connections. - """ - # fewer than MAX_STEMS outbound connections at last reshuffle? - with self.lock: - if len(self.stem) < MAX_STEMS: - self.stem.append(connection) - for k in (k for k, v in self.nodeMap.iteritems() if v is None): - self.nodeMap[k] = connection - for k, v in { - k: v for k, v in self.hashMap.iteritems() - if v.child is None - }.iteritems(): - self.hashMap[k] = Stem( - connection, v.stream, self.poissonTimeout()) - invQueue.put((v.stream, k, v.child)) - - def maybeRemoveStem(self, connection): - """ - Remove current connection from the stem list (called e.g. when - a connection is closed). - """ - # is the stem active? - with self.lock: - if connection in self.stem: - self.stem.remove(connection) - # active mappings to pointing to the removed node - for k in ( - k for k, v in self.nodeMap.iteritems() - if v == connection - ): - self.nodeMap[k] = None - for k, v in { - k: v for k, v in self.hashMap.iteritems() - if v.child == connection - }.iteritems(): - self.hashMap[k] = Stem( - None, v.stream, self.poissonTimeout()) - - def pickStem(self, parent=None): - """ - Pick a random active stem, but not the parent one - (the one where an object came from) - """ - try: - # pick a random from available stems - stem = choice(range(len(self.stem))) - if self.stem[stem] == parent: - # one stem available and it's the parent - if len(self.stem) == 1: - return None - # else, pick the other one - return self.stem[1 - stem] - # all ok - return self.stem[stem] - except IndexError: - # no stems available - return None - - def getNodeStem(self, node=None): - """ - Return child stem node for a given parent stem node - (the mapping is static for about 10 minutes, then it reshuffles) - """ - with self.lock: - try: - return self.nodeMap[node] - except KeyError: - self.nodeMap[node] = self.pickStem(node) - return self.nodeMap[node] - - def expire(self): - """Switch expired objects from stem to fluff mode""" - with self.lock: - deadline = time() - toDelete = [ - [v.stream, k, v.child] for k, v in self.hashMap.iteritems() - if v.timeout < deadline - ] - - for row in toDelete: - self.removeHash(row[1], 'expiration') - invQueue.put(row) - return toDelete - - def reRandomiseStems(self): - """Re-shuffle stem mapping (parent <-> child pairs)""" - with self.lock: - try: - # random two connections - self.stem = sample( - connectionpool.BMConnectionPool( - ).outboundConnections.values(), MAX_STEMS) - # not enough stems available - except ValueError: - self.stem = connectionpool.BMConnectionPool( - ).outboundConnections.values() - self.nodeMap = {} - # hashMap stays to cater for pending stems - self.refresh = time() + REASSIGN_INTERVAL diff --git a/src/tests/mock/pybitmessage/network/downloadthread.py b/src/tests/mock/pybitmessage/network/downloadthread.py deleted file mode 100644 index 0ae83b5b..00000000 --- a/src/tests/mock/pybitmessage/network/downloadthread.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -`DownloadThread` class definition -""" -import time - -import addresses -import helper_random -import protocol -from dandelion import Dandelion -from inventory import Inventory -from network.connectionpool import BMConnectionPool -from objectracker import missingObjects -from threads import StoppableThread - - -class DownloadThread(StoppableThread): - """Thread-based class for downloading from connections""" - minPending = 200 - maxRequestChunk = 1000 - requestTimeout = 60 - cleanInterval = 60 - requestExpires = 3600 - - def __init__(self): - super(DownloadThread, self).__init__(name="Downloader") - self.lastCleaned = time.time() - - def cleanPending(self): - """Expire pending downloads eventually""" - deadline = time.time() - self.requestExpires - try: - toDelete = [ - k for k, v in missingObjects.iteritems() - if v < deadline] - except RuntimeError: - pass - else: - for i in toDelete: - del missingObjects[i] - self.lastCleaned = time.time() - - def run(self): - while not self._stopped: - requested = 0 - # Choose downloading peers randomly - connections = BMConnectionPool().establishedConnections() - helper_random.randomshuffle(connections) - requestChunk = max(int( - min(self.maxRequestChunk, len(missingObjects)) - / len(connections)), 1) if connections else 1 - - for i in connections: - now = time.time() - # avoid unnecessary delay - if i.skipUntil >= now: - continue - try: - request = i.objectsNewToMe.randomKeys(requestChunk) - except KeyError: - continue - payload = bytearray() - chunkCount = 0 - for chunk in request: - if chunk in Inventory() and not Dandelion().hasHash(chunk): - try: - del i.objectsNewToMe[chunk] - except KeyError: - pass - continue - payload.extend(chunk) - chunkCount += 1 - missingObjects[chunk] = now - if not chunkCount: - continue - payload[0:0] = addresses.encodeVarint(chunkCount) - i.append_write_buf(protocol.CreatePacket('getdata', payload)) - self.logger.debug( - '%s:%i Requesting %i objects', - i.destination.host, i.destination.port, chunkCount) - requested += chunkCount - if time.time() >= self.lastCleaned + self.cleanInterval: - self.cleanPending() - if not requested: - self.stop.wait(1) diff --git a/src/tests/mock/pybitmessage/network/http.py b/src/tests/mock/pybitmessage/network/http.py deleted file mode 100644 index d7a938fa..00000000 --- a/src/tests/mock/pybitmessage/network/http.py +++ /dev/null @@ -1,89 +0,0 @@ -import socket - -from advanceddispatcher import AdvancedDispatcher -import asyncore_pollchoose as asyncore -from proxy import ProxyError -from socks5 import Socks5Connection, Socks5Resolver -from socks4a import Socks4aConnection, Socks4aResolver - - -class HttpError(ProxyError): - pass - - -class HttpConnection(AdvancedDispatcher): - def __init__(self, host, path="/"): # pylint: disable=redefined-outer-name - AdvancedDispatcher.__init__(self) - self.path = path - self.destination = (host, 80) - self.create_socket(socket.AF_INET, socket.SOCK_STREAM) - self.connect(self.destination) - print("connecting in background to %s:%i" % self.destination) - - def state_init(self): - self.append_write_buf( - "GET %s HTTP/1.1\r\nHost: %s\r\nConnection: close\r\n\r\n" % ( - self.path, self.destination[0])) - print("Sending %ib" % len(self.write_buf)) - self.set_state("http_request_sent", 0) - return False - - def state_http_request_sent(self): - if self.read_buf: - print("Received %ib" % len(self.read_buf)) - self.read_buf = b"" - if not self.connected: - self.set_state("close", 0) - return False - - -class Socks5HttpConnection(Socks5Connection, HttpConnection): - def __init__(self, host, path="/"): # pylint: disable=super-init-not-called, redefined-outer-name - self.path = path - Socks5Connection.__init__(self, address=(host, 80)) - - def state_socks_handshake_done(self): - HttpConnection.state_init(self) - return False - - -class Socks4aHttpConnection(Socks4aConnection, HttpConnection): - def __init__(self, host, path="/"): # pylint: disable=super-init-not-called, redefined-outer-name - Socks4aConnection.__init__(self, address=(host, 80)) - self.path = path - - def state_socks_handshake_done(self): - HttpConnection.state_init(self) - return False - - -if __name__ == "__main__": - # initial fill - for host in ("bootstrap8080.bitmessage.org", "bootstrap8444.bitmessage.org"): - proxy = Socks5Resolver(host=host) - while asyncore.socket_map: - print("loop %s, len %i" % (proxy.state, len(asyncore.socket_map))) - asyncore.loop(timeout=1, count=1) - proxy.resolved() - - proxy = Socks4aResolver(host=host) - while asyncore.socket_map: - print("loop %s, len %i" % (proxy.state, len(asyncore.socket_map))) - asyncore.loop(timeout=1, count=1) - proxy.resolved() - - for host in ("bitmessage.org",): - direct = HttpConnection(host) - while asyncore.socket_map: - # print "loop, state = %s" % (direct.state) - asyncore.loop(timeout=1, count=1) - - proxy = Socks5HttpConnection(host) - while asyncore.socket_map: - # print "loop, state = %s" % (proxy.state) - asyncore.loop(timeout=1, count=1) - - proxy = Socks4aHttpConnection(host) - while asyncore.socket_map: - # print "loop, state = %s" % (proxy.state) - asyncore.loop(timeout=1, count=1) diff --git a/src/tests/mock/pybitmessage/network/httpd.py b/src/tests/mock/pybitmessage/network/httpd.py deleted file mode 100644 index b69ffa99..00000000 --- a/src/tests/mock/pybitmessage/network/httpd.py +++ /dev/null @@ -1,161 +0,0 @@ -""" -src/network/httpd.py -======================= -""" -import asyncore -import socket - -from tls import TLSHandshake - - -class HTTPRequestHandler(asyncore.dispatcher): - """Handling HTTP request""" - response = """HTTP/1.0 200 OK\r - Date: Sun, 23 Oct 2016 18:02:00 GMT\r - Content-Type: text/html; charset=UTF-8\r - Content-Encoding: UTF-8\r - Content-Length: 136\r - Last-Modified: Wed, 08 Jan 2003 23:11:55 GMT\r - Server: Apache/1.3.3.7 (Unix) (Red-Hat/Linux)\r - ETag: "3f80f-1b6-3e1cb03b"\r - Accept-Ranges: bytes\r - Connection: close\r - \r - - - An Example Page - - - Hello World, this is a very simple HTML document. - - """ - - def __init__(self, sock): - if not hasattr(self, '_map'): - asyncore.dispatcher.__init__(self, sock) - self.inbuf = "" - self.ready = True - self.busy = False - self.respos = 0 - - def handle_close(self): - self.close() - - def readable(self): - return self.ready - - def writable(self): - return self.busy - - def handle_read(self): - self.inbuf += self.recv(8192) - if self.inbuf[-4:] == "\r\n\r\n": - self.busy = True - self.ready = False - self.inbuf = "" - elif self.inbuf == "": - pass - - def handle_write(self): - if self.busy and self.respos < len(HTTPRequestHandler.response): - written = 0 - written = self.send(HTTPRequestHandler.response[self.respos:65536]) - self.respos += written - elif self.busy: - self.busy = False - self.ready = True - self.close() - - -class HTTPSRequestHandler(HTTPRequestHandler, TLSHandshake): - """Handling HTTPS request""" - def __init__(self, sock): - if not hasattr(self, '_map'): - asyncore.dispatcher.__init__(self, sock) # pylint: disable=non-parent-init-called - # self.tlsDone = False - TLSHandshake.__init__( - self, - sock=sock, - certfile='/home/shurdeek/src/PyBitmessage/src/sslkeys/cert.pem', - keyfile='/home/shurdeek/src/PyBitmessage/src/sslkeys/key.pem', - server_side=True) - HTTPRequestHandler.__init__(self, sock) - - def handle_connect(self): - TLSHandshake.handle_connect(self) - - def handle_close(self): - if self.tlsDone: - HTTPRequestHandler.close(self) - else: - TLSHandshake.close(self) - - def readable(self): - if self.tlsDone: - return HTTPRequestHandler.readable(self) - return TLSHandshake.readable(self) - - def handle_read(self): - if self.tlsDone: - HTTPRequestHandler.handle_read(self) - else: - TLSHandshake.handle_read(self) - - def writable(self): - if self.tlsDone: - return HTTPRequestHandler.writable(self) - return TLSHandshake.writable(self) - - def handle_write(self): - if self.tlsDone: - HTTPRequestHandler.handle_write(self) - else: - TLSHandshake.handle_write(self) - - -class HTTPServer(asyncore.dispatcher): - """Handling HTTP Server""" - port = 12345 - - def __init__(self): - if not hasattr(self, '_map'): - asyncore.dispatcher.__init__(self) - self.create_socket(socket.AF_INET, socket.SOCK_STREAM) - self.set_reuse_addr() - self.bind(('127.0.0.1', HTTPServer.port)) - self.connections = 0 - self.listen(5) - - def handle_accept(self): - pair = self.accept() - if pair is not None: - sock, addr = pair - # print 'Incoming connection from %s' % repr(addr) - self.connections += 1 - # if self.connections % 1000 == 0: - # print "Processed %i connections, active %i" % (self.connections, len(asyncore.socket_map)) - HTTPRequestHandler(sock) - - -class HTTPSServer(HTTPServer): - """Handling HTTPS Server""" - port = 12345 - - def __init__(self): - if not hasattr(self, '_map'): - HTTPServer.__init__(self) - - def handle_accept(self): - pair = self.accept() - if pair is not None: - sock, addr = pair - # print 'Incoming connection from %s' % repr(addr) - self.connections += 1 - # if self.connections % 1000 == 0: - # print "Processed %i connections, active %i" % (self.connections, len(asyncore.socket_map)) - HTTPSRequestHandler(sock) - - -if __name__ == "__main__": - client = HTTPSServer() - asyncore.loop() diff --git a/src/tests/mock/pybitmessage/network/https.py b/src/tests/mock/pybitmessage/network/https.py deleted file mode 100644 index a7b8b57c..00000000 --- a/src/tests/mock/pybitmessage/network/https.py +++ /dev/null @@ -1,71 +0,0 @@ -import asyncore - -from http import HTTPClient -from tls import TLSHandshake - -""" -self.sslSock = ssl.wrap_socket( - self.sock, - keyfile=os.path.join(paths.codePath(), 'sslkeys', 'key.pem'), - certfile=os.path.join(paths.codePath(), 'sslkeys', 'cert.pem'), - server_side=not self.initiatedConnection, - ssl_version=ssl.PROTOCOL_TLSv1, - do_handshake_on_connect=False, - ciphers='AECDH-AES256-SHA') -""" - - -class HTTPSClient(HTTPClient, TLSHandshake): - def __init__(self, host, path): - if not hasattr(self, '_map'): - asyncore.dispatcher.__init__(self) - self.tlsDone = False - """ - TLSHandshake.__init__( - self, - address=(host, 443), - certfile='/home/shurdeek/src/PyBitmessage/sslsrc/keys/cert.pem', - keyfile='/home/shurdeek/src/PyBitmessage/src/sslkeys/key.pem', - server_side=False, - ciphers='AECDH-AES256-SHA') - """ - HTTPClient.__init__(self, host, path, connect=False) - TLSHandshake.__init__(self, address=(host, 443), server_side=False) - - def handle_connect(self): - TLSHandshake.handle_connect(self) - - def handle_close(self): - if self.tlsDone: - HTTPClient.close(self) - else: - TLSHandshake.close(self) - - def readable(self): - if self.tlsDone: - return HTTPClient.readable(self) - else: - return TLSHandshake.readable(self) - - def handle_read(self): - if self.tlsDone: - HTTPClient.handle_read(self) - else: - TLSHandshake.handle_read(self) - - def writable(self): - if self.tlsDone: - return HTTPClient.writable(self) - else: - return TLSHandshake.writable(self) - - def handle_write(self): - if self.tlsDone: - HTTPClient.handle_write(self) - else: - TLSHandshake.handle_write(self) - - -if __name__ == "__main__": - client = HTTPSClient('anarchy.economicsofbitcoin.com', '/') - asyncore.loop() diff --git a/src/tests/mock/pybitmessage/network/invthread.py b/src/tests/mock/pybitmessage/network/invthread.py deleted file mode 100644 index e68b7692..00000000 --- a/src/tests/mock/pybitmessage/network/invthread.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -Thread to send inv annoucements -""" -import Queue -import random -from time import time - -import addresses -import protocol -import state -from network.connectionpool import BMConnectionPool -from network.dandelion import Dandelion -from queues import invQueue -from threads import StoppableThread - - -def handleExpiredDandelion(expired): - """For expired dandelion objects, mark all remotes as not having - the object""" - if not expired: - return - for i in BMConnectionPool().connections(): - if not i.fullyEstablished: - continue - for x in expired: - streamNumber, hashid, _ = x - try: - del i.objectsNewToMe[hashid] - except KeyError: - if streamNumber in i.streams: - with i.objectsNewToThemLock: - i.objectsNewToThem[hashid] = time() - - -class InvThread(StoppableThread): - """Main thread that sends inv annoucements""" - - name = "InvBroadcaster" - - @staticmethod - def handleLocallyGenerated(stream, hashId): - """Locally generated inventory items require special handling""" - Dandelion().addHash(hashId, stream=stream) - for connection in BMConnectionPool().connections(): - if state.dandelion and connection != \ - Dandelion().objectChildStem(hashId): - continue - connection.objectsNewToThem[hashId] = time() - - def run(self): # pylint: disable=too-many-branches - while not state.shutdown: # pylint: disable=too-many-nested-blocks - chunk = [] - while True: - # Dandelion fluff trigger by expiration - handleExpiredDandelion(Dandelion().expire()) - try: - data = invQueue.get(False) - chunk.append((data[0], data[1])) - # locally generated - if len(data) == 2 or data[2] is None: - self.handleLocallyGenerated(data[0], data[1]) - except Queue.Empty: - break - - if chunk: - for connection in BMConnectionPool().connections(): - fluffs = [] - stems = [] - for inv in chunk: - if inv[0] not in connection.streams: - continue - try: - with connection.objectsNewToThemLock: - del connection.objectsNewToThem[inv[1]] - except KeyError: - continue - try: - if connection == Dandelion().objectChildStem(inv[1]): - # Fluff trigger by RNG - # auto-ignore if config set to 0, i.e. dandelion is off - if random.randint(1, 100) >= state.dandelion: - fluffs.append(inv[1]) - # send a dinv only if the stem node supports dandelion - elif connection.services & protocol.NODE_DANDELION > 0: - stems.append(inv[1]) - else: - fluffs.append(inv[1]) - except KeyError: - fluffs.append(inv[1]) - - if fluffs: - random.shuffle(fluffs) - connection.append_write_buf(protocol.CreatePacket( - 'inv', - addresses.encodeVarint( - len(fluffs)) + ''.join(fluffs))) - if stems: - random.shuffle(stems) - connection.append_write_buf(protocol.CreatePacket( - 'dinv', - addresses.encodeVarint( - len(stems)) + ''.join(stems))) - - invQueue.iterate() - for _ in range(len(chunk)): - invQueue.task_done() - - if Dandelion().refresh < time(): - Dandelion().reRandomiseStems() - - self.stop.wait(1) diff --git a/src/tests/mock/pybitmessage/network/knownnodes.py b/src/tests/mock/pybitmessage/network/knownnodes.py deleted file mode 100644 index 77a01fcc..00000000 --- a/src/tests/mock/pybitmessage/network/knownnodes.py +++ /dev/null @@ -1,269 +0,0 @@ -""" -Manipulations with knownNodes dictionary. -""" -# TODO: knownnodes object maybe? -# pylint: disable=global-statement - -import json -import logging -import os -import pickle -import threading -import time -try: - from collections.abc import Iterable -except ImportError: - from collections import Iterable - -from pybitmessage import state -from pybitmessage.bmconfigparser import BMConfigParser -from pybitmessage.network.node import Peer - -state.Peer = Peer - -knownNodesLock = threading.RLock() -"""Thread lock for knownnodes modification""" -knownNodes = {stream: {} for stream in range(1, 4)} -"""The dict of known nodes for each stream""" - -knownNodesTrimAmount = 2000 -"""trim stream knownnodes dict to this length""" - -knownNodesForgetRating = -0.5 -"""forget a node after rating is this low""" - -knownNodesActual = False - -logger = logging.getLogger('default') - -DEFAULT_NODES = ( - Peer('5.45.99.75', 8444), - Peer('75.167.159.54', 8444), - Peer('95.165.168.168', 8444), - Peer('85.180.139.241', 8444), - Peer('158.222.217.190', 8080), - Peer('178.62.12.187', 8448), - Peer('24.188.198.204', 8111), - Peer('109.147.204.113', 1195), - Peer('178.11.46.221', 8444) -) - - -def json_serialize_knownnodes(output): - """ - Reorganize knownnodes dict and write it as JSON to output - """ - _serialized = [] - for stream, peers in knownNodes.iteritems(): - for peer, info in peers.iteritems(): - info.update(rating=round(info.get('rating', 0), 2)) - _serialized.append({ - 'stream': stream, 'peer': peer._asdict(), 'info': info - }) - json.dump(_serialized, output, indent=4) - - -def json_deserialize_knownnodes(source): - """ - Read JSON from source and make knownnodes dict - """ - global knownNodesActual - for node in json.load(source): - peer = node['peer'] - info = node['info'] - peer = Peer(str(peer['host']), peer.get('port', 8444)) - knownNodes[node['stream']][peer] = info - if not (knownNodesActual - or info.get('self')) and peer not in DEFAULT_NODES: - knownNodesActual = True - - -def pickle_deserialize_old_knownnodes(source): - """ - Unpickle source and reorganize knownnodes dict if it has old format - the old format was {Peer:lastseen, ...} - the new format is {Peer:{"lastseen":i, "rating":f}} - """ - global knownNodes - knownNodes = pickle.load(source) - for stream in knownNodes.keys(): - for node, params in knownNodes[stream].iteritems(): - if isinstance(params, (float, int)): - addKnownNode(stream, node, params) - - -def saveKnownNodes(dirName=None): - """Save knownnodes to filesystem""" - if dirName is None: - dirName = state.appdata - with knownNodesLock: - with open(os.path.join(dirName, 'knownnodes.dat'), 'wb') as output: - json_serialize_knownnodes(output) - - -def addKnownNode(stream, peer, lastseen=None, is_self=False): - """ - Add a new node to the dict or update lastseen if it already exists. - Do it for each stream number if *stream* is `Iterable`. - Returns True if added a new node. - """ - # pylint: disable=too-many-branches - if isinstance(stream, Iterable): - with knownNodesLock: - for s in stream: - addKnownNode(s, peer, lastseen, is_self) - return - - rating = 0.0 - if not lastseen: - # FIXME: maybe about 28 days? - lastseen = int(time.time()) - else: - lastseen = int(lastseen) - try: - info = knownNodes[stream].get(peer) - if lastseen > info['lastseen']: - info['lastseen'] = lastseen - except (KeyError, TypeError): - pass - else: - return - - if not is_self: - if len(knownNodes[stream]) > BMConfigParser().safeGetInt( - "knownnodes", "maxnodes"): - return - - knownNodes[stream][peer] = { - 'lastseen': lastseen, - 'rating': rating or 1 if is_self else 0, - 'self': is_self, - } - return True - - -def createDefaultKnownNodes(): - """Creating default Knownnodes""" - past = time.time() - 2418600 # 28 days - 10 min - for peer in DEFAULT_NODES: - addKnownNode(1, peer, past) - saveKnownNodes() - - -def readKnownNodes(): - """Load knownnodes from filesystem""" - try: - with open(state.appdata + 'knownnodes.dat', 'rb') as source: - with knownNodesLock: - try: - json_deserialize_knownnodes(source) - except ValueError: - source.seek(0) - pickle_deserialize_old_knownnodes(source) - except (IOError, OSError, KeyError, EOFError): - logger.debug( - 'Failed to read nodes from knownnodes.dat', exc_info=True) - createDefaultKnownNodes() - - config = BMConfigParser() - - # your own onion address, if setup - onionhostname = config.safeGet('bitmessagesettings', 'onionhostname') - if onionhostname and ".onion" in onionhostname: - onionport = config.safeGetInt('bitmessagesettings', 'onionport') - if onionport: - self_peer = Peer(onionhostname, onionport) - addKnownNode(1, self_peer, is_self=True) - state.ownAddresses[self_peer] = True - - -def increaseRating(peer): - """Increase rating of a peer node""" - increaseAmount = 0.1 - maxRating = 1 - with knownNodesLock: - for stream in knownNodes.keys(): - try: - knownNodes[stream][peer]["rating"] = min( - knownNodes[stream][peer]["rating"] + increaseAmount, - maxRating - ) - except KeyError: - pass - - -def decreaseRating(peer): - """Decrease rating of a peer node""" - decreaseAmount = 0.1 - minRating = -1 - with knownNodesLock: - for stream in knownNodes.keys(): - try: - knownNodes[stream][peer]["rating"] = max( - knownNodes[stream][peer]["rating"] - decreaseAmount, - minRating - ) - except KeyError: - pass - - -def trimKnownNodes(recAddrStream=1): - """Triming Knownnodes""" - if len(knownNodes[recAddrStream]) < \ - BMConfigParser().safeGetInt("knownnodes", "maxnodes"): - return - with knownNodesLock: - oldestList = sorted( - knownNodes[recAddrStream], - key=lambda x: x['lastseen'] - )[:knownNodesTrimAmount] - for oldest in oldestList: - del knownNodes[recAddrStream][oldest] - - -def dns(): - """Add DNS names to knownnodes""" - for port in [8080, 8444]: - addKnownNode( - 1, Peer('bootstrap%s.bitmessage.org' % port, port)) - - -def cleanupKnownNodes(): - """ - Cleanup knownnodes: remove old nodes and nodes with low rating - """ - global knownNodesActual - now = int(time.time()) - needToWriteKnownNodesToDisk = False - - with knownNodesLock: - for stream in knownNodes: - if stream not in state.streamsInWhichIAmParticipating: - continue - keys = knownNodes[stream].keys() - for node in keys: - if len(knownNodes[stream]) <= 1: # leave at least one node - if stream == 1: - knownNodesActual = False - break - try: - age = now - knownNodes[stream][node]["lastseen"] - # scrap old nodes (age > 28 days) - if age > 2419200: - needToWriteKnownNodesToDisk = True - del knownNodes[stream][node] - continue - # scrap old nodes (age > 3 hours) with low rating - if (age > 10800 and knownNodes[stream][node]["rating"] - <= knownNodesForgetRating): - needToWriteKnownNodesToDisk = True - del knownNodes[stream][node] - continue - except TypeError: - logger.warning('Error in %s', node) - keys = [] - - # Let us write out the knowNodes to disk - # if there is anything new to write out. - if needToWriteKnownNodesToDisk: - saveKnownNodes() diff --git a/src/tests/mock/pybitmessage/network/networkthread.py b/src/tests/mock/pybitmessage/network/networkthread.py deleted file mode 100644 index 61ff6c09..00000000 --- a/src/tests/mock/pybitmessage/network/networkthread.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -A thread to handle network concerns -""" -import network.asyncore_pollchoose as asyncore -import state -from network.connectionpool import BMConnectionPool -from queues import excQueue -from threads import StoppableThread - - -class BMNetworkThread(StoppableThread): - """Main network thread""" - name = "Asyncore" - - def run(self): - try: - while not self._stopped and state.shutdown == 0: - BMConnectionPool().loop() - except Exception as e: - excQueue.put((self.name, e)) - raise - - def stopThread(self): - super(BMNetworkThread, self).stopThread() - for i in BMConnectionPool().listeningSockets.values(): - try: - i.close() - except: - pass - for i in BMConnectionPool().outboundConnections.values(): - try: - i.close() - except: - pass - for i in BMConnectionPool().inboundConnections.values(): - try: - i.close() - except: - pass - - # just in case - asyncore.close_all() diff --git a/src/tests/mock/pybitmessage/network/node.py b/src/tests/mock/pybitmessage/network/node.py deleted file mode 100644 index 4c532b81..00000000 --- a/src/tests/mock/pybitmessage/network/node.py +++ /dev/null @@ -1,7 +0,0 @@ -""" -Named tuples representing the network peers -""" -import collections - -Peer = collections.namedtuple('Peer', ['host', 'port']) -Node = collections.namedtuple('Node', ['services', 'host', 'port']) diff --git a/src/tests/mock/pybitmessage/network/objectracker.py b/src/tests/mock/pybitmessage/network/objectracker.py deleted file mode 100644 index 65e06de4..00000000 --- a/src/tests/mock/pybitmessage/network/objectracker.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -Module for tracking objects -""" -import time -from threading import RLock - -import network.connectionpool -from network.dandelion import Dandelion -from randomtrackingdict import RandomTrackingDict - -haveBloom = False - -try: - # pybloomfiltermmap - from pybloomfilter import BloomFilter - haveBloom = True -except ImportError: - try: - # pybloom - from pybloom import BloomFilter - haveBloom = True - except ImportError: - pass - -# it isn't actually implemented yet so no point in turning it on -haveBloom = False - -# tracking pending downloads globally, for stats -missingObjects = {} - - -class ObjectTracker(object): - """Object tracker mixin""" - invCleanPeriod = 300 - invInitialCapacity = 50000 - invErrorRate = 0.03 - trackingExpires = 3600 - initialTimeOffset = 60 - - def __init__(self): - self.objectsNewToMe = RandomTrackingDict() - self.objectsNewToThem = {} - self.objectsNewToThemLock = RLock() - self.initInvBloom() - self.initAddrBloom() - self.lastCleaned = time.time() - - def initInvBloom(self): - """Init bloom filter for tracking. WIP.""" - if haveBloom: - # lock? - self.invBloom = BloomFilter( - capacity=ObjectTracker.invInitialCapacity, - error_rate=ObjectTracker.invErrorRate) - - def initAddrBloom(self): - """Init bloom filter for tracking addrs, WIP. - This either needs to be moved to addrthread.py or removed.""" - if haveBloom: - # lock? - self.addrBloom = BloomFilter( - capacity=ObjectTracker.invInitialCapacity, - error_rate=ObjectTracker.invErrorRate) - - def clean(self): - """Clean up tracking to prevent memory bloat""" - if self.lastCleaned < time.time() - ObjectTracker.invCleanPeriod: - if haveBloom: - if missingObjects == 0: - self.initInvBloom() - self.initAddrBloom() - else: - # release memory - deadline = time.time() - ObjectTracker.trackingExpires - with self.objectsNewToThemLock: - self.objectsNewToThem = { - k: v - for k, v in self.objectsNewToThem.iteritems() - if v >= deadline} - self.lastCleaned = time.time() - - def hasObj(self, hashid): - """Do we already have object?""" - if haveBloom: - return hashid in self.invBloom - return hashid in self.objectsNewToMe - - def handleReceivedInventory(self, hashId): - """Handling received inventory""" - if haveBloom: - self.invBloom.add(hashId) - try: - with self.objectsNewToThemLock: - del self.objectsNewToThem[hashId] - except KeyError: - pass - if hashId not in missingObjects: - missingObjects[hashId] = time.time() - self.objectsNewToMe[hashId] = True - - def handleReceivedObject(self, streamNumber, hashid): - """Handling received object""" - for i in network.connectionpool.BMConnectionPool().connections(): - if not i.fullyEstablished: - continue - try: - del i.objectsNewToMe[hashid] - except KeyError: - if streamNumber in i.streams and ( - not Dandelion().hasHash(hashid) - or Dandelion().objectChildStem(hashid) == i): - with i.objectsNewToThemLock: - i.objectsNewToThem[hashid] = time.time() - # update stream number, - # which we didn't have when we just received the dinv - # also resets expiration of the stem mode - Dandelion().setHashStream(hashid, streamNumber) - - if i == self: - try: - with i.objectsNewToThemLock: - del i.objectsNewToThem[hashid] - except KeyError: - pass - self.objectsNewToMe.setLastObject() - - def hasAddr(self, addr): - """WIP, should be moved to addrthread.py or removed""" - if haveBloom: - return addr in self.invBloom - return None - - def addAddr(self, hashid): - """WIP, should be moved to addrthread.py or removed""" - if haveBloom: - self.addrBloom.add(hashid) diff --git a/src/tests/mock/pybitmessage/network/proxy.py b/src/tests/mock/pybitmessage/network/proxy.py deleted file mode 100644 index 3bd3cc66..00000000 --- a/src/tests/mock/pybitmessage/network/proxy.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Set proxy if avaiable otherwise exception -""" -# pylint: disable=protected-access -import logging -import socket -import time - -import asyncore_pollchoose as asyncore -from advanceddispatcher import AdvancedDispatcher -from bmconfigparser import BMConfigParser -from node import Peer - -logger = logging.getLogger('default') - - -class ProxyError(Exception): - """Base proxy exception class""" - errorCodes = ("Unknown error",) - - def __init__(self, code=-1): - self.code = code - try: - self.message = self.errorCodes[code] - except IndexError: - self.message = self.errorCodes[-1] - super(ProxyError, self).__init__(self.message) - - -class GeneralProxyError(ProxyError): - """General proxy error class (not specfic to an implementation)""" - errorCodes = ( - "Success", - "Invalid data", - "Not connected", - "Not available", - "Bad proxy type", - "Bad input", - "Timed out", - "Network unreachable", - "Connection refused", - "Host unreachable" - ) - - -class Proxy(AdvancedDispatcher): - """Base proxy class""" - # these are global, and if you change config during runtime, - # all active/new instances should change too - _proxy = ("127.0.0.1", 9050) - _auth = None - _onion_proxy = None - _onion_auth = None - _remote_dns = True - - @property - def proxy(self): - """Return proxy IP and port""" - return self.__class__._proxy - - @proxy.setter - def proxy(self, address): - """Set proxy IP and port""" - if (not isinstance(address, tuple) or len(address) < 2 - or not isinstance(address[0], str) - or not isinstance(address[1], int)): - raise ValueError - self.__class__._proxy = address - - @property - def auth(self): - """Return proxy authentication settings""" - return self.__class__._auth - - @auth.setter - def auth(self, authTuple): - """Set proxy authentication (username and password)""" - self.__class__._auth = authTuple - - @property - def onion_proxy(self): - """ - Return separate proxy IP and port for use only with onion - addresses. Untested. - """ - return self.__class__._onion_proxy - - @onion_proxy.setter - def onion_proxy(self, address): - """Set onion proxy address""" - if address is not None and ( - not isinstance(address, tuple) or len(address) < 2 - or not isinstance(address[0], str) - or not isinstance(address[1], int) - ): - raise ValueError - self.__class__._onion_proxy = address - - @property - def onion_auth(self): - """Return proxy authentication settings for onion hosts only""" - return self.__class__._onion_auth - - @onion_auth.setter - def onion_auth(self, authTuple): - """Set proxy authentication for onion hosts only. Untested.""" - self.__class__._onion_auth = authTuple - - def __init__(self, address): - if not isinstance(address, Peer): - raise ValueError - AdvancedDispatcher.__init__(self) - self.destination = address - self.isOutbound = True - self.fullyEstablished = False - self.create_socket(socket.AF_INET, socket.SOCK_STREAM) - if BMConfigParser().safeGetBoolean( - "bitmessagesettings", "socksauthentication"): - self.auth = ( - BMConfigParser().safeGet( - "bitmessagesettings", "socksusername"), - BMConfigParser().safeGet( - "bitmessagesettings", "sockspassword")) - else: - self.auth = None - self.connect( - self.onion_proxy - if address.host.endswith(".onion") and self.onion_proxy else - self.proxy - ) - - def handle_connect(self): - """Handle connection event (to the proxy)""" - self.set_state("init") - try: - AdvancedDispatcher.handle_connect(self) - except socket.error as e: - if e.errno in asyncore._DISCONNECTED: - logger.debug( - "%s:%i: Connection failed: %s", - self.destination.host, self.destination.port, e) - return - self.state_init() - - def state_proxy_handshake_done(self): - """Handshake is complete at this point""" - self.connectedAt = time.time() - return False diff --git a/src/tests/mock/pybitmessage/network/receivequeuethread.py b/src/tests/mock/pybitmessage/network/receivequeuethread.py deleted file mode 100644 index 56c01b77..00000000 --- a/src/tests/mock/pybitmessage/network/receivequeuethread.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Process data incoming from network -""" -import errno -import Queue -import socket - -import state -from network.advanceddispatcher import UnknownStateError -from network.connectionpool import BMConnectionPool -from queues import receiveDataQueue -from threads import StoppableThread - - -class ReceiveQueueThread(StoppableThread): - """This thread processes data received from the network - (which is done by the asyncore thread)""" - def __init__(self, num=0): - super(ReceiveQueueThread, self).__init__(name="ReceiveQueue_%i" % num) - - def run(self): - while not self._stopped and state.shutdown == 0: - try: - dest = receiveDataQueue.get(block=True, timeout=1) - except Queue.Empty: - continue - - if self._stopped or state.shutdown: - break - - # cycle as long as there is data - # methods should return False if there isn't enough data, - # or the connection is to be aborted - - # state_* methods should return False if there isn't - # enough data, or the connection is to be aborted - - try: - connection = BMConnectionPool().getConnectionByAddr(dest) - # connection object not found - except KeyError: - receiveDataQueue.task_done() - continue - try: - connection.process() - # state isn't implemented - except UnknownStateError: - pass - except socket.error as err: - if err.errno == errno.EBADF: - connection.set_state("close", 0) - else: - self.logger.error('Socket error: %s', err) - except: # noqa:E722 - self.logger.error('Error processing', exc_info=True) - receiveDataQueue.task_done() diff --git a/src/tests/mock/pybitmessage/network/socks4a.py b/src/tests/mock/pybitmessage/network/socks4a.py deleted file mode 100644 index e9786168..00000000 --- a/src/tests/mock/pybitmessage/network/socks4a.py +++ /dev/null @@ -1,147 +0,0 @@ -""" -SOCKS4a proxy module -""" -# pylint: disable=attribute-defined-outside-init -import logging -import socket -import struct - -from proxy import GeneralProxyError, Proxy, ProxyError - -logger = logging.getLogger('default') - - -class Socks4aError(ProxyError): - """SOCKS4a error base class""" - errorCodes = ( - "Request granted", - "Request rejected or failed", - "Request rejected because SOCKS server cannot connect to identd" - " on the client", - "Request rejected because the client program and identd report" - " different user-ids", - "Unknown error" - ) - - -class Socks4a(Proxy): - """SOCKS4a proxy class""" - def __init__(self, address=None): - Proxy.__init__(self, address) - self.ipaddr = None - self.destport = address[1] - - def state_init(self): - """Protocol initialisation (before connection is established)""" - self.set_state("auth_done", 0) - return True - - def state_pre_connect(self): - """Handle feedback from SOCKS4a while it is connecting on our behalf""" - # Get the response - if self.read_buf[0:1] != chr(0x00).encode(): - # bad data - self.close() - raise GeneralProxyError(1) - elif self.read_buf[1:2] != chr(0x5A).encode(): - # Connection failed - self.close() - if ord(self.read_buf[1:2]) in (91, 92, 93): - # socks 4 error - raise Socks4aError(ord(self.read_buf[1:2]) - 90) - else: - raise Socks4aError(4) - # Get the bound address/port - self.boundport = struct.unpack(">H", self.read_buf[2:4])[0] - self.boundaddr = self.read_buf[4:] - self.__proxysockname = (self.boundaddr, self.boundport) - if self.ipaddr: - self.__proxypeername = ( - socket.inet_ntoa(self.ipaddr), self.destination[1]) - else: - self.__proxypeername = (self.destination[0], self.destport) - self.set_state("proxy_handshake_done", length=8) - return True - - def proxy_sock_name(self): - """ - Handle return value when using SOCKS4a for DNS resolving - instead of connecting. - """ - return socket.inet_ntoa(self.__proxysockname[0]) - - -class Socks4aConnection(Socks4a): - """Child SOCKS4a class used for making outbound connections.""" - def __init__(self, address): - Socks4a.__init__(self, address=address) - - def state_auth_done(self): - """Request connection to be made""" - # Now we can request the actual connection - rmtrslv = False - self.append_write_buf( - struct.pack('>BBH', 0x04, 0x01, self.destination[1])) - # If the given destination address is an IP address, we'll - # use the IPv4 address request even if remote resolving was specified. - try: - self.ipaddr = socket.inet_aton(self.destination[0]) - self.append_write_buf(self.ipaddr) - except socket.error: - # Well it's not an IP number, so it's probably a DNS name. - if self._remote_dns: - # Resolve remotely - rmtrslv = True - self.ipaddr = None - self.append_write_buf( - struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)) - else: - # Resolve locally - self.ipaddr = socket.inet_aton( - socket.gethostbyname(self.destination[0])) - self.append_write_buf(self.ipaddr) - if self._auth: - self.append_write_buf(self._auth[0]) - self.append_write_buf(chr(0x00).encode()) - if rmtrslv: - self.append_write_buf(self.destination[0] + chr(0x00).encode()) - self.set_state("pre_connect", length=0, expectBytes=8) - return True - - def state_pre_connect(self): - """Tell SOCKS4a to initiate a connection""" - try: - return Socks4a.state_pre_connect(self) - except Socks4aError as e: - self.close_reason = e.message - self.set_state("close") - - -class Socks4aResolver(Socks4a): - """DNS resolver class using SOCKS4a""" - def __init__(self, host): - self.host = host - self.port = 8444 - Socks4a.__init__(self, address=(self.host, self.port)) - - def state_auth_done(self): - """Request connection to be made""" - # Now we can request the actual connection - self.append_write_buf( - struct.pack('>BBH', 0x04, 0xF0, self.destination[1])) - self.append_write_buf(struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)) - if self._auth: - self.append_write_buf(self._auth[0]) - self.append_write_buf(chr(0x00).encode()) - self.append_write_buf(self.host + chr(0x00).encode()) - self.set_state("pre_connect", length=0, expectBytes=8) - return True - - def resolved(self): - """ - Resolving is done, process the return value. To use this within - PyBitmessage, a callback needs to be implemented which hasn't - been done yet. - """ - logger.debug( - 'Resolved %s as %s', self.host, self.proxy_sock_name()) diff --git a/src/tests/mock/pybitmessage/network/socks5.py b/src/tests/mock/pybitmessage/network/socks5.py deleted file mode 100644 index d1daae42..00000000 --- a/src/tests/mock/pybitmessage/network/socks5.py +++ /dev/null @@ -1,224 +0,0 @@ -""" -SOCKS5 proxy module -""" -# pylint: disable=attribute-defined-outside-init - -import logging -import socket -import struct - -from node import Peer -from proxy import GeneralProxyError, Proxy, ProxyError - -logger = logging.getLogger('default') - - -class Socks5AuthError(ProxyError): - """Rised when the socks5 protocol encounters an authentication error""" - errorCodes = ( - "Succeeded", - "Authentication is required", - "All offered authentication methods were rejected", - "Unknown username or invalid password", - "Unknown error" - ) - - -class Socks5Error(ProxyError): - """Rised when socks5 protocol encounters an error""" - errorCodes = ( - "Succeeded", - "General SOCKS server failure", - "Connection not allowed by ruleset", - "Network unreachable", - "Host unreachable", - "Connection refused", - "TTL expired", - "Command not supported", - "Address type not supported", - "Unknown error" - ) - - -class Socks5(Proxy): - """A socks5 proxy base class""" - def __init__(self, address=None): - Proxy.__init__(self, address) - self.ipaddr = None - self.destport = address[1] - - def state_init(self): - """Protocol initialization (before connection is established)""" - if self._auth: - self.append_write_buf(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02)) - else: - self.append_write_buf(struct.pack('BBB', 0x05, 0x01, 0x00)) - self.set_state("auth_1", length=0, expectBytes=2) - return True - - def state_auth_1(self): - """Perform authentication if peer is requesting it.""" - ret = struct.unpack('BB', self.read_buf[:2]) - if ret[0] != 5: - # general error - raise GeneralProxyError(1) - elif ret[1] == 0: - # no auth required - self.set_state("auth_done", length=2) - elif ret[1] == 2: - # username/password - self.append_write_buf( - struct.pack( - 'BB', 1, len(self._auth[0])) + self._auth[0] + struct.pack( - 'B', len(self._auth[1])) + self._auth[1]) - self.set_state("auth_needed", length=2, expectBytes=2) - else: - if ret[1] == 0xff: - # auth error - raise Socks5AuthError(2) - else: - # other error - raise GeneralProxyError(1) - return True - - def state_auth_needed(self): - """Handle response to authentication attempt""" - ret = struct.unpack('BB', self.read_buf[0:2]) - if ret[0] != 1: - # general error - raise GeneralProxyError(1) - if ret[1] != 0: - # auth error - raise Socks5AuthError(3) - # all ok - self.set_state("auth_done", length=2) - return True - - def state_pre_connect(self): - """Handle feedback from socks5 while it is connecting on our behalf.""" - # Get the response - if self.read_buf[0:1] != chr(0x05).encode(): - self.close() - raise GeneralProxyError(1) - elif self.read_buf[1:2] != chr(0x00).encode(): - # Connection failed - self.close() - if ord(self.read_buf[1:2]) <= 8: - raise Socks5Error(ord(self.read_buf[1:2])) - else: - raise Socks5Error(9) - # Get the bound address/port - elif self.read_buf[3:4] == chr(0x01).encode(): - self.set_state("proxy_addr_1", length=4, expectBytes=4) - elif self.read_buf[3:4] == chr(0x03).encode(): - self.set_state("proxy_addr_2_1", length=4, expectBytes=1) - else: - self.close() - raise GeneralProxyError(1) - return True - - def state_proxy_addr_1(self): - """Handle IPv4 address returned for peer""" - self.boundaddr = self.read_buf[0:4] - self.set_state("proxy_port", length=4, expectBytes=2) - return True - - def state_proxy_addr_2_1(self): - """ - Handle other addresses than IPv4 returned for peer - (e.g. IPv6, onion, ...). This is part 1 which retrieves the - length of the data. - """ - self.address_length = ord(self.read_buf[0:1]) - self.set_state( - "proxy_addr_2_2", length=1, expectBytes=self.address_length) - return True - - def state_proxy_addr_2_2(self): - """ - Handle other addresses than IPv4 returned for peer - (e.g. IPv6, onion, ...). This is part 2 which retrieves the data. - """ - self.boundaddr = self.read_buf[0:self.address_length] - self.set_state("proxy_port", length=self.address_length, expectBytes=2) - return True - - def state_proxy_port(self): - """Handle peer's port being returned.""" - self.boundport = struct.unpack(">H", self.read_buf[0:2])[0] - self.__proxysockname = (self.boundaddr, self.boundport) - if self.ipaddr is not None: - self.__proxypeername = ( - socket.inet_ntoa(self.ipaddr), self.destination[1]) - else: - self.__proxypeername = (self.destination[0], self.destport) - self.set_state("proxy_handshake_done", length=2) - return True - - def proxy_sock_name(self): - """Handle return value when using SOCKS5 - for DNS resolving instead of connecting.""" - return socket.inet_ntoa(self.__proxysockname[0]) - - -class Socks5Connection(Socks5): - """Child socks5 class used for making outbound connections.""" - def state_auth_done(self): - """Request connection to be made""" - # Now we can request the actual connection - self.append_write_buf(struct.pack('BBB', 0x05, 0x01, 0x00)) - # If the given destination address is an IP address, we'll - # use the IPv4 address request even if remote resolving was specified. - try: - self.ipaddr = socket.inet_aton(self.destination[0]) - self.append_write_buf(chr(0x01).encode() + self.ipaddr) - except socket.error: # may be IPv6! - # Well it's not an IP number, so it's probably a DNS name. - if self._remote_dns: - # Resolve remotely - self.ipaddr = None - self.append_write_buf(chr(0x03).encode() + chr( - len(self.destination[0])).encode() + self.destination[0]) - else: - # Resolve locally - self.ipaddr = socket.inet_aton( - socket.gethostbyname(self.destination[0])) - self.append_write_buf(chr(0x01).encode() + self.ipaddr) - self.append_write_buf(struct.pack(">H", self.destination[1])) - self.set_state("pre_connect", length=0, expectBytes=4) - return True - - def state_pre_connect(self): - """Tell socks5 to initiate a connection""" - try: - return Socks5.state_pre_connect(self) - except Socks5Error as e: - self.close_reason = e.message - self.set_state("close") - - -class Socks5Resolver(Socks5): - """DNS resolver class using socks5""" - def __init__(self, host): - self.host = host - self.port = 8444 - Socks5.__init__(self, address=Peer(self.host, self.port)) - - def state_auth_done(self): - """Perform resolving""" - # Now we can request the actual connection - self.append_write_buf(struct.pack('BBB', 0x05, 0xF0, 0x00)) - self.append_write_buf(chr(0x03).encode() + chr( - len(self.host)).encode() + str(self.host)) - self.append_write_buf(struct.pack(">H", self.port)) - self.set_state("pre_connect", length=0, expectBytes=4) - return True - - def resolved(self): - """ - Resolving is done, process the return value. - To use this within PyBitmessage, a callback needs to be - implemented which hasn't been done yet. - """ - logger.debug( - 'Resolved %s as %s', self.host, self.proxy_sock_name()) diff --git a/src/tests/mock/pybitmessage/network/stats.py b/src/tests/mock/pybitmessage/network/stats.py deleted file mode 100644 index 82e6c87f..00000000 --- a/src/tests/mock/pybitmessage/network/stats.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -Network statistics -""" -import time - -import asyncore_pollchoose as asyncore -from network.connectionpool import BMConnectionPool -from objectracker import missingObjects - - -lastReceivedTimestamp = time.time() -lastReceivedBytes = 0 -currentReceivedSpeed = 0 -lastSentTimestamp = time.time() -lastSentBytes = 0 -currentSentSpeed = 0 - - -def connectedHostsList(): - """List of all the connected hosts""" - return BMConnectionPool().establishedConnections() - - -def sentBytes(): - """Sending Bytes""" - return asyncore.sentBytes - - -def uploadSpeed(): - """Getting upload speed""" - # pylint: disable=global-statement - global lastSentTimestamp, lastSentBytes, currentSentSpeed - currentTimestamp = time.time() - if int(lastSentTimestamp) < int(currentTimestamp): - currentSentBytes = asyncore.sentBytes - currentSentSpeed = int( - (currentSentBytes - lastSentBytes) / ( - currentTimestamp - lastSentTimestamp)) - lastSentBytes = currentSentBytes - lastSentTimestamp = currentTimestamp - return currentSentSpeed - - -def receivedBytes(): - """Receiving Bytes""" - return asyncore.receivedBytes - - -def downloadSpeed(): - """Getting download speed""" - # pylint: disable=global-statement - global lastReceivedTimestamp, lastReceivedBytes, currentReceivedSpeed - currentTimestamp = time.time() - if int(lastReceivedTimestamp) < int(currentTimestamp): - currentReceivedBytes = asyncore.receivedBytes - currentReceivedSpeed = int( - (currentReceivedBytes - lastReceivedBytes) / ( - currentTimestamp - lastReceivedTimestamp)) - lastReceivedBytes = currentReceivedBytes - lastReceivedTimestamp = currentTimestamp - return currentReceivedSpeed - - -def pendingDownload(): - """Getting pending downloads""" - return len(missingObjects) - - -def pendingUpload(): - """Getting pending uploads""" - # tmp = {} - # for connection in BMConnectionPool().inboundConnections.values() + \ - # BMConnectionPool().outboundConnections.values(): - # for k in connection.objectsNewToThem.keys(): - # tmp[k] = True - # This probably isn't the correct logic so it's disabled - # return len(tmp) - return 0 diff --git a/src/tests/mock/pybitmessage/network/tcp.py b/src/tests/mock/pybitmessage/network/tcp.py deleted file mode 100644 index ff778378..00000000 --- a/src/tests/mock/pybitmessage/network/tcp.py +++ /dev/null @@ -1,448 +0,0 @@ -""" -TCP protocol handler -""" -# pylint: disable=too-many-ancestors -import l10n -import logging -import math -import random -import socket -import time - -import addresses -import asyncore_pollchoose as asyncore -import connectionpool -import helper_random -import knownnodes -import protocol -import state -from bmconfigparser import BMConfigParser -from helper_random import randomBytes -from inventory import Inventory -from network.advanceddispatcher import AdvancedDispatcher -from network.assemble import assemble_addr -from network.bmproto import BMProto -from network.constants import MAX_OBJECT_COUNT -from network.dandelion import Dandelion -from network.objectracker import ObjectTracker -from network.socks4a import Socks4aConnection -from network.socks5 import Socks5Connection -from network.tls import TLSDispatcher -from node import Peer -from queues import invQueue, receiveDataQueue, UISignalQueue -from tr import _translate - -logger = logging.getLogger('default') - - -maximumAgeOfNodesThatIAdvertiseToOthers = 10800 #: Equals three hours -maximumTimeOffsetWrongCount = 3 #: Connections with wrong time offset - - -class TCPConnection(BMProto, TLSDispatcher): - # pylint: disable=too-many-instance-attributes - """ - .. todo:: Look to understand and/or fix the non-parent-init-called - """ - - def __init__(self, address=None, sock=None): - BMProto.__init__(self, address=address, sock=sock) - self.verackReceived = False - self.verackSent = False - self.streams = [0] - self.fullyEstablished = False - self.skipUntil = 0 - if address is None and sock is not None: - self.destination = Peer(*sock.getpeername()) - self.isOutbound = False - TLSDispatcher.__init__(self, sock, server_side=True) - self.connectedAt = time.time() - logger.debug( - 'Received connection from %s:%i', - self.destination.host, self.destination.port) - self.nodeid = randomBytes(8) - elif address is not None and sock is not None: - TLSDispatcher.__init__(self, sock, server_side=False) - self.isOutbound = True - logger.debug( - 'Outbound proxy connection to %s:%i', - self.destination.host, self.destination.port) - else: - self.destination = address - self.isOutbound = True - self.create_socket( - socket.AF_INET6 if ":" in address.host else socket.AF_INET, - socket.SOCK_STREAM) - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - TLSDispatcher.__init__(self, sock, server_side=False) - self.connect(self.destination) - logger.debug( - 'Connecting to %s:%i', - self.destination.host, self.destination.port) - try: - self.local = ( - protocol.checkIPAddress( - protocol.encodeHost(self.destination.host), True) - and not protocol.checkSocksIP(self.destination.host) - ) - except socket.error: - # it's probably a hostname - pass - self.network_group = protocol.network_group(self.destination.host) - ObjectTracker.__init__(self) # pylint: disable=non-parent-init-called - self.bm_proto_reset() - self.set_state("bm_header", expectBytes=protocol.Header.size) - - def antiIntersectionDelay(self, initial=False): - """ - This is a defense against the so called intersection attacks. - - It is called when you notice peer is requesting non-existing - objects, or right after the connection is established. It will - estimate how long an object will take to propagate across the - network, and skip processing "getdata" requests until then. This - means an attacker only has one shot per IP to perform the attack. - """ - # estimated time for a small object to propagate across the - # whole network - max_known_nodes = max( - len(knownnodes.knownNodes[x]) for x in knownnodes.knownNodes) - delay = math.ceil(math.log(max_known_nodes + 2, 20)) * ( - 0.2 + invQueue.queueCount / 2.0) - # take the stream with maximum amount of nodes - # +2 is to avoid problems with log(0) and log(1) - # 20 is avg connected nodes count - # 0.2 is avg message transmission time - if delay > 0: - if initial: - self.skipUntil = self.connectedAt + delay - if self.skipUntil > time.time(): - logger.debug( - 'Initial skipping processing getdata for %.2fs', - self.skipUntil - time.time()) - else: - logger.debug( - 'Skipping processing getdata due to missing object' - ' for %.2fs', delay) - self.skipUntil = time.time() + delay - - def checkTimeOffsetNotification(self): - """ - Check if we have connected to too many nodes which have too high - time offset from us - """ - if BMProto.timeOffsetWrongCount > \ - maximumTimeOffsetWrongCount and \ - not self.fullyEstablished: - UISignalQueue.put(( - 'updateStatusBar', - _translate( - "MainWindow", - "The time on your computer, %1, may be wrong. " - "Please verify your settings." - ).arg(l10n.formatTimestamp()))) - - def state_connection_fully_established(self): - """ - State after the bitmessage protocol handshake is completed - (version/verack exchange, and if both side support TLS, - the TLS handshake as well). - """ - self.set_connection_fully_established() - self.set_state("bm_header") - self.bm_proto_reset() - return True - - def set_connection_fully_established(self): - """Initiate inventory synchronisation.""" - if not self.isOutbound and not self.local: - state.clientHasReceivedIncomingConnections = True - UISignalQueue.put(('setStatusIcon', 'green')) - UISignalQueue.put(( - 'updateNetworkStatusTab', (self.isOutbound, True, self.destination) - )) - self.antiIntersectionDelay(True) - self.fullyEstablished = True - # The connection having host suitable for knownnodes - if self.isOutbound or not self.local and not state.socksIP: - knownnodes.increaseRating(self.destination) - knownnodes.addKnownNode( - self.streams, self.destination, time.time()) - Dandelion().maybeAddStem(self) - self.sendAddr() - self.sendBigInv() - - def sendAddr(self): - """Send a partial list of known addresses to peer.""" - # We are going to share a maximum number of 1000 addrs (per overlapping - # stream) with our peer. 500 from overlapping streams, 250 from the - # left child stream, and 250 from the right child stream. - maxAddrCount = BMConfigParser().safeGetInt( - "bitmessagesettings", "maxaddrperstreamsend", 500) - - templist = [] - addrs = {} - for stream in self.streams: - with knownnodes.knownNodesLock: - for n, s in enumerate((stream, stream * 2, stream * 2 + 1)): - nodes = knownnodes.knownNodes.get(s) - if not nodes: - continue - # only if more recent than 3 hours - # and having positive or neutral rating - filtered = [ - (k, v) for k, v in nodes.iteritems() - if v["lastseen"] > int(time.time()) - - maximumAgeOfNodesThatIAdvertiseToOthers - and v["rating"] >= 0 and len(k.host) <= 22 - ] - # sent 250 only if the remote isn't interested in it - elemCount = min( - len(filtered), - maxAddrCount / 2 if n else maxAddrCount) - addrs[s] = helper_random.randomsample(filtered, elemCount) - for substream in addrs: - for peer, params in addrs[substream]: - templist.append((substream, peer, params["lastseen"])) - if templist: - self.append_write_buf(assemble_addr(templist)) - - def sendBigInv(self): - """ - Send hashes of all inventory objects, chunked as the protocol has - a per-command limit. - """ - def sendChunk(): - """Send one chunk of inv entries in one command""" - if objectCount == 0: - return - logger.debug( - 'Sending huge inv message with %i objects to just this' - ' one peer', objectCount) - self.append_write_buf(protocol.CreatePacket( - 'inv', addresses.encodeVarint(objectCount) + payload)) - - # Select all hashes for objects in this stream. - bigInvList = {} - for stream in self.streams: - # may lock for a long time, but I think it's better than - # thousands of small locks - with self.objectsNewToThemLock: - for objHash in Inventory().unexpired_hashes_by_stream(stream): - # don't advertise stem objects on bigInv - if Dandelion().hasHash(objHash): - continue - bigInvList[objHash] = 0 - objectCount = 0 - payload = b'' - # Now let us start appending all of these hashes together. - # They will be sent out in a big inv message to our new peer. - for obj_hash, _ in bigInvList.items(): - payload += obj_hash - objectCount += 1 - - # Remove -1 below when sufficient time has passed for users to - # upgrade to versions of PyBitmessage that accept inv with 50,000 - # items - if objectCount >= MAX_OBJECT_COUNT - 1: - sendChunk() - payload = b'' - objectCount = 0 - - # flush - sendChunk() - - def handle_connect(self): - """Callback for TCP connection being established.""" - try: - AdvancedDispatcher.handle_connect(self) - except socket.error as e: - # pylint: disable=protected-access - if e.errno in asyncore._DISCONNECTED: - logger.debug( - '%s:%i: Connection failed: %s', - self.destination.host, self.destination.port, e) - return - self.nodeid = randomBytes(8) - self.append_write_buf( - protocol.assembleVersionMessage( - self.destination.host, self.destination.port, - connectionpool.BMConnectionPool().streams, - False, nodeid=self.nodeid)) - self.connectedAt = time.time() - receiveDataQueue.put(self.destination) - - def handle_read(self): - """Callback for reading from a socket""" - TLSDispatcher.handle_read(self) - receiveDataQueue.put(self.destination) - - def handle_write(self): - """Callback for writing to a socket""" - TLSDispatcher.handle_write(self) - - def handle_close(self): - """Callback for connection being closed.""" - host_is_global = self.isOutbound or not self.local and not state.socksIP - if self.fullyEstablished: - UISignalQueue.put(( - 'updateNetworkStatusTab', - (self.isOutbound, False, self.destination) - )) - if host_is_global: - knownnodes.addKnownNode( - self.streams, self.destination, time.time()) - Dandelion().maybeRemoveStem(self) - else: - self.checkTimeOffsetNotification() - if host_is_global: - knownnodes.decreaseRating(self.destination) - BMProto.handle_close(self) - - -class Socks5BMConnection(Socks5Connection, TCPConnection): - """SOCKS5 wrapper for TCP connections""" - - def __init__(self, address): - Socks5Connection.__init__(self, address=address) - TCPConnection.__init__(self, address=address, sock=self.socket) - self.set_state("init") - - def state_proxy_handshake_done(self): - """ - State when SOCKS5 connection succeeds, we need to send a - Bitmessage handshake to peer. - """ - Socks5Connection.state_proxy_handshake_done(self) - self.nodeid = randomBytes(8) - self.append_write_buf( - protocol.assembleVersionMessage( - self.destination.host, self.destination.port, - connectionpool.BMConnectionPool().streams, - False, nodeid=self.nodeid)) - self.set_state("bm_header", expectBytes=protocol.Header.size) - return True - - -class Socks4aBMConnection(Socks4aConnection, TCPConnection): - """SOCKS4a wrapper for TCP connections""" - - def __init__(self, address): - Socks4aConnection.__init__(self, address=address) - TCPConnection.__init__(self, address=address, sock=self.socket) - self.set_state("init") - - def state_proxy_handshake_done(self): - """ - State when SOCKS4a connection succeeds, we need to send a - Bitmessage handshake to peer. - """ - Socks4aConnection.state_proxy_handshake_done(self) - self.nodeid = randomBytes(8) - self.append_write_buf( - protocol.assembleVersionMessage( - self.destination.host, self.destination.port, - connectionpool.BMConnectionPool().streams, - False, nodeid=self.nodeid)) - self.set_state("bm_header", expectBytes=protocol.Header.size) - return True - - -def bootstrap(connection_class): - """Make bootstrapper class for connection type (connection_class)""" - class Bootstrapper(connection_class): - """Base class for bootstrappers""" - _connection_base = connection_class - - def __init__(self, host, port): - self._connection_base.__init__(self, Peer(host, port)) - self.close_reason = self._succeed = False - - def bm_command_addr(self): - """ - Got addr message - the bootstrap succeed. - Let BMProto process the addr message and switch state to 'close' - """ - BMProto.bm_command_addr(self) - self._succeed = True - self.close_reason = "Thanks for bootstrapping!" - self.set_state("close") - - def set_connection_fully_established(self): - """Only send addr here""" - # pylint: disable=attribute-defined-outside-init - self.fullyEstablished = True - self.sendAddr() - - def handle_close(self): - """ - After closing the connection switch knownnodes.knownNodesActual - back to False if the bootstrapper failed. - """ - BMProto.handle_close(self) - if not self._succeed: - knownnodes.knownNodesActual = False - - return Bootstrapper - - -class TCPServer(AdvancedDispatcher): - """TCP connection server for Bitmessage protocol""" - - def __init__(self, host='127.0.0.1', port=8444): - if not hasattr(self, '_map'): - AdvancedDispatcher.__init__(self) - self.create_socket(socket.AF_INET, socket.SOCK_STREAM) - self.set_reuse_addr() - for attempt in range(50): - try: - if attempt > 0: - logger.warning('Failed to bind on port %s', port) - port = random.randint(32767, 65535) - self.bind((host, port)) - except socket.error as e: - if e.errno in (asyncore.EADDRINUSE, asyncore.WSAEADDRINUSE): - continue - else: - if attempt > 0: - logger.warning('Setting port to %s', port) - BMConfigParser().set( - 'bitmessagesettings', 'port', str(port)) - BMConfigParser().save() - break - self.destination = Peer(host, port) - self.bound = True - self.listen(5) - - def is_bound(self): - """Is the socket bound?""" - try: - return self.bound - except AttributeError: - return False - - def handle_accept(self): - """Incoming connection callback""" - try: - sock = self.accept()[0] - except (TypeError, IndexError): - return - - state.ownAddresses[Peer(*sock.getsockname())] = True - if ( - len(connectionpool.BMConnectionPool()) - > BMConfigParser().safeGetInt( - 'bitmessagesettings', 'maxtotalconnections') - + BMConfigParser().safeGetInt( - 'bitmessagesettings', 'maxbootstrapconnections') + 10 - ): - # 10 is a sort of buffer, in between it will go through - # the version handshake and return an error to the peer - logger.warning("Server full, dropping connection") - sock.close() - return - try: - connectionpool.BMConnectionPool().addConnection( - TCPConnection(sock=sock)) - except socket.error: - pass diff --git a/src/tests/mock/pybitmessage/network/threads.py b/src/tests/mock/pybitmessage/network/threads.py deleted file mode 100644 index 9bdaa85d..00000000 --- a/src/tests/mock/pybitmessage/network/threads.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Threading primitives for the network package""" - -import logging -import random -import threading -from contextlib import contextmanager - - -class StoppableThread(threading.Thread): - """Base class for application threads with stopThread method""" - name = None - logger = logging.getLogger('default') - - def __init__(self, name=None): - if name: - self.name = name - super(StoppableThread, self).__init__(name=self.name) - self.stop = threading.Event() - self._stopped = False - random.seed() - self.logger.info('Init thread %s', self.name) - - def stopThread(self): - """Stop the thread""" - self._stopped = True - self.stop.set() - - -class BusyError(threading.ThreadError): - """ - Thread error raised when another connection holds the lock - we are trying to acquire. - """ - pass - - -@contextmanager -def nonBlocking(lock): - """ - A context manager which acquires given lock non-blocking - and raises BusyError if failed to acquire. - """ - locked = lock.acquire(False) - if not locked: - raise BusyError - try: - yield - finally: - lock.release() diff --git a/src/tests/mock/pybitmessage/network/tls.py b/src/tests/mock/pybitmessage/network/tls.py deleted file mode 100644 index a3774b44..00000000 --- a/src/tests/mock/pybitmessage/network/tls.py +++ /dev/null @@ -1,220 +0,0 @@ -""" -SSL/TLS negotiation. -""" -import logging -import os -import socket -import ssl -import sys - -import network.asyncore_pollchoose as asyncore -import paths -from network.advanceddispatcher import AdvancedDispatcher -from queues import receiveDataQueue - -logger = logging.getLogger('default') - -_DISCONNECTED_SSL = frozenset((ssl.SSL_ERROR_EOF,)) - -if sys.version_info >= (2, 7, 13): - # this means TLSv1 or higher - # in the future change to - # ssl.PROTOCOL_TLS1.2 - sslProtocolVersion = ssl.PROTOCOL_TLS # pylint: disable=no-member -elif sys.version_info >= (2, 7, 9): - # this means any SSL/TLS. - # SSLv2 and 3 are excluded with an option after context is created - sslProtocolVersion = ssl.PROTOCOL_SSLv23 -else: - # this means TLSv1, there is no way to set "TLSv1 or higher" - # or "TLSv1.2" in < 2.7.9 - sslProtocolVersion = ssl.PROTOCOL_TLSv1 - - -# ciphers -if ( - ssl.OPENSSL_VERSION_NUMBER >= 0x10100000 - and not ssl.OPENSSL_VERSION.startswith(b"LibreSSL") -): - sslProtocolCiphers = "AECDH-AES256-SHA@SECLEVEL=0" -else: - sslProtocolCiphers = "AECDH-AES256-SHA" - - -class TLSDispatcher(AdvancedDispatcher): - """TLS functionality for classes derived from AdvancedDispatcher""" - # pylint: disable=too-many-instance-attributes, too-many-arguments - # pylint: disable=super-init-not-called - def __init__(self, _=None, sock=None, certfile=None, keyfile=None, - server_side=False, ciphers=sslProtocolCiphers): - self.want_read = self.want_write = True - self.certfile = certfile or os.path.join( - paths.codePath(), 'sslkeys', 'cert.pem') - self.keyfile = keyfile or os.path.join( - paths.codePath(), 'sslkeys', 'key.pem') - self.server_side = server_side - self.ciphers = ciphers - self.tlsStarted = False - self.tlsDone = False - self.tlsVersion = "N/A" - self.isSSL = False - - def state_tls_init(self): - """Prepare sockets for TLS handshake""" - self.isSSL = True - self.tlsStarted = True - # Once the connection has been established, - # it's safe to wrap the socket. - if sys.version_info >= (2, 7, 9): - context = ssl.create_default_context( - purpose=ssl.Purpose.SERVER_AUTH - if self.server_side else ssl.Purpose.CLIENT_AUTH) - context.set_ciphers(self.ciphers) - context.set_ecdh_curve("secp256k1") - context.check_hostname = False - context.verify_mode = ssl.CERT_NONE - # also exclude TLSv1 and TLSv1.1 in the future - context.options = ssl.OP_ALL | ssl.OP_NO_SSLv2 |\ - ssl.OP_NO_SSLv3 | ssl.OP_SINGLE_ECDH_USE |\ - ssl.OP_CIPHER_SERVER_PREFERENCE - self.sslSocket = context.wrap_socket( - self.socket, server_side=self.server_side, - do_handshake_on_connect=False) - else: - self.sslSocket = ssl.wrap_socket( - self.socket, server_side=self.server_side, - ssl_version=sslProtocolVersion, - certfile=self.certfile, keyfile=self.keyfile, - ciphers=self.ciphers, do_handshake_on_connect=False) - self.sslSocket.setblocking(0) - self.want_read = self.want_write = True - self.set_state("tls_handshake") - return False - - @staticmethod - def state_tls_handshake(): - """ - Do nothing while TLS handshake is pending, as during this phase - we need to react to callbacks instead - """ - return False - - def writable(self): - """Handle writable checks for TLS-enabled sockets""" - try: - if self.tlsStarted and not self.tlsDone and not self.write_buf: - return self.want_write - except AttributeError: - pass - return AdvancedDispatcher.writable(self) - - def readable(self): - """Handle readable check for TLS-enabled sockets""" - try: - # during TLS handshake, and after flushing write buffer, - # return status of last handshake attempt - if self.tlsStarted and not self.tlsDone and not self.write_buf: - logger.debug('tls readable, %r', self.want_read) - return self.want_read - # prior to TLS handshake, - # receiveDataThread should emulate synchronous behaviour - if not self.fullyEstablished and ( - self.expectBytes == 0 or not self.write_buf_empty()): - return False - except AttributeError: - pass - return AdvancedDispatcher.readable(self) - - def handle_read(self): - """ - Handle reads for sockets during TLS handshake. Requires special - treatment as during the handshake, buffers must remain empty - and normal reads must be ignored. - """ - try: - # wait for write buffer flush - if self.tlsStarted and not self.tlsDone and not self.write_buf: - self.tls_handshake() - else: - AdvancedDispatcher.handle_read(self) - except AttributeError: - AdvancedDispatcher.handle_read(self) - except ssl.SSLError as err: - if err.errno == ssl.SSL_ERROR_WANT_READ: - return - if err.errno not in _DISCONNECTED_SSL: - logger.info("SSL Error: %s", err) - self.close_reason = "SSL Error in handle_read" - self.handle_close() - - def handle_write(self): - """ - Handle writes for sockets during TLS handshake. Requires special - treatment as during the handshake, buffers must remain empty - and normal writes must be ignored. - """ - try: - # wait for write buffer flush - if self.tlsStarted and not self.tlsDone and not self.write_buf: - self.tls_handshake() - else: - AdvancedDispatcher.handle_write(self) - except AttributeError: - AdvancedDispatcher.handle_write(self) - except ssl.SSLError as err: - if err.errno == ssl.SSL_ERROR_WANT_WRITE: - return - if err.errno not in _DISCONNECTED_SSL: - logger.info("SSL Error: %s", err) - self.close_reason = "SSL Error in handle_write" - self.handle_close() - - def tls_handshake(self): - """Perform TLS handshake and handle its stages""" - # wait for flush - if self.write_buf: - return False - # Perform the handshake. - try: - logger.debug("handshaking (internal)") - self.sslSocket.do_handshake() - except ssl.SSLError as err: - self.close_reason = "SSL Error in tls_handshake" - logger.info("%s:%i: handshake fail", *self.destination) - self.want_read = self.want_write = False - if err.args[0] == ssl.SSL_ERROR_WANT_READ: - logger.debug("want read") - self.want_read = True - if err.args[0] == ssl.SSL_ERROR_WANT_WRITE: - logger.debug("want write") - self.want_write = True - if not (self.want_write or self.want_read): - raise - except socket.error as err: - # pylint: disable=protected-access - if err.errno in asyncore._DISCONNECTED: - self.close_reason = "socket.error in tls_handshake" - self.handle_close() - else: - raise - else: - if sys.version_info >= (2, 7, 9): - self.tlsVersion = self.sslSocket.version() - logger.debug( - '%s:%i: TLS handshake success, TLS protocol version: %s', - self.destination.host, self.destination.port, - self.tlsVersion) - else: - self.tlsVersion = "TLSv1" - logger.debug( - '%s:%i: TLS handshake success', - self.destination.host, self.destination.port) - # The handshake has completed, so remove this channel and... - self.del_channel() - self.set_socket(self.sslSocket) - self.tlsDone = True - - self.bm_proto_reset() - self.set_state("connection_fully_established") - receiveDataQueue.put(self.destination) - return False diff --git a/src/tests/mock/pybitmessage/network/udp.py b/src/tests/mock/pybitmessage/network/udp.py deleted file mode 100644 index 3f999332..00000000 --- a/src/tests/mock/pybitmessage/network/udp.py +++ /dev/null @@ -1,147 +0,0 @@ -""" -UDP protocol handler -""" -import logging -import socket -import time - -import protocol -import state -from bmproto import BMProto -from constants import MAX_TIME_OFFSET -from node import Peer -from objectracker import ObjectTracker -from queues import receiveDataQueue - -logger = logging.getLogger('default') - - -class UDPSocket(BMProto): # pylint: disable=too-many-instance-attributes - """Bitmessage protocol over UDP (class)""" - port = 8444 - - def __init__(self, host=None, sock=None, announcing=False): - # pylint: disable=bad-super-call - super(BMProto, self).__init__(sock=sock) - self.verackReceived = True - self.verackSent = True - # .. todo:: sort out streams - self.streams = [1] - self.fullyEstablished = True - self.skipUntil = 0 - if sock is None: - if host is None: - host = '' - self.create_socket( - socket.AF_INET6 if ":" in host else socket.AF_INET, - socket.SOCK_DGRAM - ) - self.set_socket_reuse() - logger.info("Binding UDP socket to %s:%i", host, self.port) - self.socket.bind((host, self.port)) - else: - self.socket = sock - self.set_socket_reuse() - self.listening = Peer(*self.socket.getsockname()) - self.destination = Peer(*self.socket.getsockname()) - ObjectTracker.__init__(self) - self.connecting = False - self.connected = True - self.announcing = announcing - self.set_state("bm_header", expectBytes=protocol.Header.size) - - def set_socket_reuse(self): - """Set socket reuse option""" - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - try: - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) - except AttributeError: - pass - - # disable most commands before doing research / testing - # only addr (peer discovery), error and object are implemented - - def bm_command_getdata(self): - # return BMProto.bm_command_getdata(self) - return True - - def bm_command_inv(self): - # return BMProto.bm_command_inv(self) - return True - - def bm_command_addr(self): - addresses = self._decode_addr() - # only allow peer discovery from private IPs in order to avoid - # attacks from random IPs on the internet - if not self.local: - return True - remoteport = False - for seenTime, stream, _, ip, port in addresses: - decodedIP = protocol.checkIPAddress(str(ip)) - if stream not in state.streamsInWhichIAmParticipating: - continue - if (seenTime < time.time() - MAX_TIME_OFFSET - or seenTime > time.time() + MAX_TIME_OFFSET): - continue - if decodedIP is False: - # if the address isn't local, interpret it as - # the host's own announcement - remoteport = port - if remoteport is False: - return True - logger.debug( - "received peer discovery from %s:%i (port %i):", - self.destination.host, self.destination.port, remoteport) - state.discoveredPeers[Peer(self.destination.host, remoteport)] = \ - time.time() - return True - - def bm_command_portcheck(self): - return True - - def bm_command_ping(self): - return True - - def bm_command_pong(self): - return True - - def bm_command_verack(self): - return True - - def bm_command_version(self): - return True - - def handle_connect(self): - return - - def writable(self): - return self.write_buf - - def readable(self): - return len(self.read_buf) < self._buf_len - - def handle_read(self): - try: - recdata, addr = self.socket.recvfrom(self._buf_len) - except socket.error: - logger.error("socket error on recvfrom:", exc_info=True) - return - - self.destination = Peer(*addr) - encodedAddr = protocol.encodeHost(addr[0]) - self.local = bool(protocol.checkIPAddress(encodedAddr, True)) - # overwrite the old buffer to avoid mixing data and so that - # self.local works correctly - self.read_buf[0:] = recdata - self.bm_proto_reset() - receiveDataQueue.put(self.listening) - - def handle_write(self): - try: - retval = self.socket.sendto( - self.write_buf, ('', self.port)) - except socket.error: - logger.error("socket error on sendto:", exc_info=True) - retval = len(self.write_buf) - self.slice_write_buf(retval) diff --git a/src/tests/mock/pybitmessage/network/uploadthread.py b/src/tests/mock/pybitmessage/network/uploadthread.py deleted file mode 100644 index 7d80d789..00000000 --- a/src/tests/mock/pybitmessage/network/uploadthread.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -`UploadThread` class definition -""" -import time - -import helper_random -import protocol -from inventory import Inventory -from network.connectionpool import BMConnectionPool -from network.dandelion import Dandelion -from randomtrackingdict import RandomTrackingDict -from threads import StoppableThread - - -class UploadThread(StoppableThread): - """ - This is a thread that uploads the objects that the peers requested from me - """ - maxBufSize = 2097152 # 2MB - name = "Uploader" - - def run(self): - while not self._stopped: - uploaded = 0 - # Choose uploading peers randomly - connections = BMConnectionPool().establishedConnections() - helper_random.randomshuffle(connections) - for i in connections: - now = time.time() - # avoid unnecessary delay - if i.skipUntil >= now: - continue - if len(i.write_buf) > self.maxBufSize: - continue - try: - request = i.pendingUpload.randomKeys( - RandomTrackingDict.maxPending) - except KeyError: - continue - payload = bytearray() - chunk_count = 0 - for chunk in request: - del i.pendingUpload[chunk] - if Dandelion().hasHash(chunk) and \ - i != Dandelion().objectChildStem(chunk): - i.antiIntersectionDelay() - self.logger.info( - '%s asked for a stem object we didn\'t offer to it.', - i.destination) - break - try: - payload.extend(protocol.CreatePacket( - 'object', Inventory()[chunk].payload)) - chunk_count += 1 - except KeyError: - i.antiIntersectionDelay() - self.logger.info( - '%s asked for an object we don\'t have.', - i.destination) - break - if not chunk_count: - continue - i.append_write_buf(payload) - self.logger.debug( - '%s:%i Uploading %i objects', - i.destination.host, i.destination.port, chunk_count) - uploaded += chunk_count - if not uploaded: - self.stop.wait(1) diff --git a/src/tests/mock/pybitmessage/openclpow.py b/src/tests/mock/pybitmessage/openclpow.py deleted file mode 100644 index 1091f555..00000000 --- a/src/tests/mock/pybitmessage/openclpow.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -Module for Proof of Work using OpenCL -""" -import logging -import os -from struct import pack - -import paths -from bmconfigparser import BMConfigParser -from state import shutdown - -try: - import numpy - import pyopencl as cl - libAvailable = True -except ImportError: - libAvailable = False - - -logger = logging.getLogger('default') - -ctx = False -queue = False -program = False -gpus = [] -enabledGpus = [] -vendors = [] -hash_dt = None - - -def initCL(): - """Initlialise OpenCL engine""" - global ctx, queue, program, hash_dt # pylint: disable=global-statement - if libAvailable is False: - return - del enabledGpus[:] - del vendors[:] - del gpus[:] - ctx = False - try: - hash_dt = numpy.dtype([('target', numpy.uint64), ('v', numpy.str_, 73)]) - try: - for platform in cl.get_platforms(): - gpus.extend(platform.get_devices(device_type=cl.device_type.GPU)) - if BMConfigParser().safeGet("bitmessagesettings", "opencl") == platform.vendor: - enabledGpus.extend(platform.get_devices( - device_type=cl.device_type.GPU)) - if platform.vendor not in vendors: - vendors.append(platform.vendor) - except: # noqa:E722 - pass - if enabledGpus: - ctx = cl.Context(devices=enabledGpus) - queue = cl.CommandQueue(ctx) - f = open(os.path.join(paths.codePath(), "bitmsghash", 'bitmsghash.cl'), 'r') - fstr = ''.join(f.readlines()) - program = cl.Program(ctx, fstr).build(options="") - logger.info("Loaded OpenCL kernel") - else: - logger.info("No OpenCL GPUs found") - del enabledGpus[:] - except Exception: - logger.error("OpenCL fail: ", exc_info=True) - del enabledGpus[:] - - -def openclAvailable(): - """Are there any OpenCL GPUs available?""" - return bool(gpus) - - -def openclEnabled(): - """Is OpenCL enabled (and available)?""" - return bool(enabledGpus) - - -def do_opencl_pow(hash_, target): - """Perform PoW using OpenCL""" - output = numpy.zeros(1, dtype=[('v', numpy.uint64, 1)]) - if not enabledGpus: - return output[0][0] - - data = numpy.zeros(1, dtype=hash_dt, order='C') - data[0]['v'] = ("0000000000000000" + hash_).decode("hex") - data[0]['target'] = target - - hash_buf = cl.Buffer(ctx, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=data) - dest_buf = cl.Buffer(ctx, cl.mem_flags.WRITE_ONLY, output.nbytes) - - kernel = program.kernel_sha512 - worksize = kernel.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, enabledGpus[0]) - - kernel.set_arg(0, hash_buf) - kernel.set_arg(1, dest_buf) - - progress = 0 - globamt = worksize * 2000 - - while output[0][0] == 0 and shutdown == 0: - kernel.set_arg(2, pack(" -# See LICENSE for details. -# -# Software slightly changed by Jonathan Warren -""" -This module loads openssl libs with ctypes and incapsulates -needed openssl functionality in class _OpenSSL. -""" -import ctypes -import sys - -# pylint: disable=protected-access - -OpenSSL = None - - -class CipherName(object): - """Class returns cipher name, pointer and blocksize""" - - def __init__(self, name, pointer, blocksize): - self._name = name - self._pointer = pointer - self._blocksize = blocksize - - def __str__(self): - return "Cipher : " + self._name + \ - " | Blocksize : " + str(self._blocksize) + \ - " | Function pointer : " + str(self._pointer) - - def get_pointer(self): - """This method returns cipher pointer""" - return self._pointer() - - def get_name(self): - """This method returns cipher name""" - return self._name - - def get_blocksize(self): - """This method returns cipher blocksize""" - return self._blocksize - - -def get_version(library): - """This function return version, hexversion and cflages""" - version = None - hexversion = None - cflags = None - try: - # OpenSSL 1.1 - OPENSSL_VERSION = 0 - OPENSSL_CFLAGS = 1 - library.OpenSSL_version.argtypes = [ctypes.c_int] - library.OpenSSL_version.restype = ctypes.c_char_p - version = library.OpenSSL_version(OPENSSL_VERSION) - cflags = library.OpenSSL_version(OPENSSL_CFLAGS) - library.OpenSSL_version_num.restype = ctypes.c_long - hexversion = library.OpenSSL_version_num() - except AttributeError: - try: - # OpenSSL 1.0 - SSLEAY_VERSION = 0 - SSLEAY_CFLAGS = 2 - library.SSLeay.restype = ctypes.c_long - library.SSLeay_version.restype = ctypes.c_char_p - library.SSLeay_version.argtypes = [ctypes.c_int] - version = library.SSLeay_version(SSLEAY_VERSION) - cflags = library.SSLeay_version(SSLEAY_CFLAGS) - hexversion = library.SSLeay() - except AttributeError: - # raise NotImplementedError('Cannot determine version of this OpenSSL library.') - pass - return (version, hexversion, cflags) - - -class _OpenSSL(object): - """ - Wrapper for OpenSSL using ctypes - """ - # pylint: disable=too-many-statements, too-many-instance-attributes - def __init__(self, library): - """ - Build the wrapper - """ - self._lib = ctypes.CDLL(library) - self._version, self._hexversion, self._cflags = get_version(self._lib) - self._libreSSL = self._version.startswith(b"LibreSSL") - - self.pointer = ctypes.pointer - self.c_int = ctypes.c_int - self.byref = ctypes.byref - self.create_string_buffer = ctypes.create_string_buffer - - self.BN_new = self._lib.BN_new - self.BN_new.restype = ctypes.c_void_p - self.BN_new.argtypes = [] - - self.BN_free = self._lib.BN_free - self.BN_free.restype = None - self.BN_free.argtypes = [ctypes.c_void_p] - - self.BN_clear_free = self._lib.BN_clear_free - self.BN_clear_free.restype = None - self.BN_clear_free.argtypes = [ctypes.c_void_p] - - self.BN_num_bits = self._lib.BN_num_bits - self.BN_num_bits.restype = ctypes.c_int - self.BN_num_bits.argtypes = [ctypes.c_void_p] - - self.BN_bn2bin = self._lib.BN_bn2bin - self.BN_bn2bin.restype = ctypes.c_int - self.BN_bn2bin.argtypes = [ctypes.c_void_p, ctypes.c_void_p] - - try: - self.BN_bn2binpad = self._lib.BN_bn2binpad - self.BN_bn2binpad.restype = ctypes.c_int - self.BN_bn2binpad.argtypes = [ctypes.c_void_p, ctypes.c_void_p, - ctypes.c_int] - except AttributeError: - # optional, we have a workaround - pass - - self.BN_bin2bn = self._lib.BN_bin2bn - self.BN_bin2bn.restype = ctypes.c_void_p - self.BN_bin2bn.argtypes = [ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p] - - self.EC_KEY_free = self._lib.EC_KEY_free - self.EC_KEY_free.restype = None - self.EC_KEY_free.argtypes = [ctypes.c_void_p] - - self.EC_KEY_new_by_curve_name = self._lib.EC_KEY_new_by_curve_name - self.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p - self.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int] - - self.EC_KEY_generate_key = self._lib.EC_KEY_generate_key - self.EC_KEY_generate_key.restype = ctypes.c_int - self.EC_KEY_generate_key.argtypes = [ctypes.c_void_p] - - self.EC_KEY_check_key = self._lib.EC_KEY_check_key - self.EC_KEY_check_key.restype = ctypes.c_int - self.EC_KEY_check_key.argtypes = [ctypes.c_void_p] - - self.EC_KEY_get0_private_key = self._lib.EC_KEY_get0_private_key - self.EC_KEY_get0_private_key.restype = ctypes.c_void_p - self.EC_KEY_get0_private_key.argtypes = [ctypes.c_void_p] - - self.EC_KEY_get0_public_key = self._lib.EC_KEY_get0_public_key - self.EC_KEY_get0_public_key.restype = ctypes.c_void_p - self.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p] - - self.EC_KEY_get0_group = self._lib.EC_KEY_get0_group - self.EC_KEY_get0_group.restype = ctypes.c_void_p - self.EC_KEY_get0_group.argtypes = [ctypes.c_void_p] - - self.EC_POINT_get_affine_coordinates_GFp = \ - self._lib.EC_POINT_get_affine_coordinates_GFp - self.EC_POINT_get_affine_coordinates_GFp.restype = ctypes.c_int - self.EC_POINT_get_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - try: - self.EC_POINT_get_affine_coordinates = \ - self._lib.EC_POINT_get_affine_coordinates - except AttributeError: - # OpenSSL docs say only use this for backwards compatibility - self.EC_POINT_get_affine_coordinates = \ - self._lib.EC_POINT_get_affine_coordinates_GF2m - self.EC_POINT_get_affine_coordinates.restype = ctypes.c_int - self.EC_POINT_get_affine_coordinates.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key - self.EC_KEY_set_private_key.restype = ctypes.c_int - self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_KEY_set_public_key = self._lib.EC_KEY_set_public_key - self.EC_KEY_set_public_key.restype = ctypes.c_int - self.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_KEY_set_group = self._lib.EC_KEY_set_group - self.EC_KEY_set_group.restype = ctypes.c_int - self.EC_KEY_set_group.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_POINT_set_affine_coordinates_GFp = \ - self._lib.EC_POINT_set_affine_coordinates_GFp - self.EC_POINT_set_affine_coordinates_GFp.restype = ctypes.c_int - self.EC_POINT_set_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - try: - self.EC_POINT_set_affine_coordinates = \ - self._lib.EC_POINT_set_affine_coordinates - except AttributeError: - # OpenSSL docs say only use this for backwards compatibility - self.EC_POINT_set_affine_coordinates = \ - self._lib.EC_POINT_set_affine_coordinates_GF2m - self.EC_POINT_set_affine_coordinates.restype = ctypes.c_int - self.EC_POINT_set_affine_coordinates.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - try: - self.EC_POINT_set_compressed_coordinates = \ - self._lib.EC_POINT_set_compressed_coordinates - except AttributeError: - # OpenSSL docs say only use this for backwards compatibility - self.EC_POINT_set_compressed_coordinates = \ - self._lib.EC_POINT_set_compressed_coordinates_GF2m - self.EC_POINT_set_compressed_coordinates.restype = ctypes.c_int - self.EC_POINT_set_compressed_coordinates.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_int, - ctypes.c_void_p] - - self.EC_POINT_new = self._lib.EC_POINT_new - self.EC_POINT_new.restype = ctypes.c_void_p - self.EC_POINT_new.argtypes = [ctypes.c_void_p] - - self.EC_POINT_free = self._lib.EC_POINT_free - self.EC_POINT_free.restype = None - self.EC_POINT_free.argtypes = [ctypes.c_void_p] - - self.BN_CTX_free = self._lib.BN_CTX_free - self.BN_CTX_free.restype = None - self.BN_CTX_free.argtypes = [ctypes.c_void_p] - - self.EC_POINT_mul = self._lib.EC_POINT_mul - self.EC_POINT_mul.restype = None - self.EC_POINT_mul.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key - self.EC_KEY_set_private_key.restype = ctypes.c_int - self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - if self._hexversion >= 0x10100000 and not self._libreSSL: - self.EC_KEY_OpenSSL = self._lib.EC_KEY_OpenSSL - self._lib.EC_KEY_OpenSSL.restype = ctypes.c_void_p - self._lib.EC_KEY_OpenSSL.argtypes = [] - - self.EC_KEY_set_method = self._lib.EC_KEY_set_method - self._lib.EC_KEY_set_method.restype = ctypes.c_int - self._lib.EC_KEY_set_method.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - else: - self.ECDH_OpenSSL = self._lib.ECDH_OpenSSL - self._lib.ECDH_OpenSSL.restype = ctypes.c_void_p - self._lib.ECDH_OpenSSL.argtypes = [] - - self.ECDH_set_method = self._lib.ECDH_set_method - self._lib.ECDH_set_method.restype = ctypes.c_int - self._lib.ECDH_set_method.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.ECDH_compute_key = self._lib.ECDH_compute_key - self.ECDH_compute_key.restype = ctypes.c_int - self.ECDH_compute_key.argtypes = [ctypes.c_void_p, - ctypes.c_int, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EVP_CipherInit_ex = self._lib.EVP_CipherInit_ex - self.EVP_CipherInit_ex.restype = ctypes.c_int - self.EVP_CipherInit_ex.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EVP_CIPHER_CTX_new = self._lib.EVP_CIPHER_CTX_new - self.EVP_CIPHER_CTX_new.restype = ctypes.c_void_p - self.EVP_CIPHER_CTX_new.argtypes = [] - - # Cipher - self.EVP_aes_128_cfb128 = self._lib.EVP_aes_128_cfb128 - self.EVP_aes_128_cfb128.restype = ctypes.c_void_p - self.EVP_aes_128_cfb128.argtypes = [] - - self.EVP_aes_256_cfb128 = self._lib.EVP_aes_256_cfb128 - self.EVP_aes_256_cfb128.restype = ctypes.c_void_p - self.EVP_aes_256_cfb128.argtypes = [] - - self.EVP_aes_128_cbc = self._lib.EVP_aes_128_cbc - self.EVP_aes_128_cbc.restype = ctypes.c_void_p - self.EVP_aes_128_cbc.argtypes = [] - - self.EVP_aes_256_cbc = self._lib.EVP_aes_256_cbc - self.EVP_aes_256_cbc.restype = ctypes.c_void_p - self.EVP_aes_256_cbc.argtypes = [] - - # self.EVP_aes_128_ctr = self._lib.EVP_aes_128_ctr - # self.EVP_aes_128_ctr.restype = ctypes.c_void_p - # self.EVP_aes_128_ctr.argtypes = [] - - # self.EVP_aes_256_ctr = self._lib.EVP_aes_256_ctr - # self.EVP_aes_256_ctr.restype = ctypes.c_void_p - # self.EVP_aes_256_ctr.argtypes = [] - - self.EVP_aes_128_ofb = self._lib.EVP_aes_128_ofb - self.EVP_aes_128_ofb.restype = ctypes.c_void_p - self.EVP_aes_128_ofb.argtypes = [] - - self.EVP_aes_256_ofb = self._lib.EVP_aes_256_ofb - self.EVP_aes_256_ofb.restype = ctypes.c_void_p - self.EVP_aes_256_ofb.argtypes = [] - - self.EVP_bf_cbc = self._lib.EVP_bf_cbc - self.EVP_bf_cbc.restype = ctypes.c_void_p - self.EVP_bf_cbc.argtypes = [] - - self.EVP_bf_cfb64 = self._lib.EVP_bf_cfb64 - self.EVP_bf_cfb64.restype = ctypes.c_void_p - self.EVP_bf_cfb64.argtypes = [] - - self.EVP_rc4 = self._lib.EVP_rc4 - self.EVP_rc4.restype = ctypes.c_void_p - self.EVP_rc4.argtypes = [] - - if self._hexversion >= 0x10100000 and not self._libreSSL: - self.EVP_CIPHER_CTX_reset = self._lib.EVP_CIPHER_CTX_reset - self.EVP_CIPHER_CTX_reset.restype = ctypes.c_int - self.EVP_CIPHER_CTX_reset.argtypes = [ctypes.c_void_p] - else: - self.EVP_CIPHER_CTX_cleanup = self._lib.EVP_CIPHER_CTX_cleanup - self.EVP_CIPHER_CTX_cleanup.restype = ctypes.c_int - self.EVP_CIPHER_CTX_cleanup.argtypes = [ctypes.c_void_p] - - self.EVP_CIPHER_CTX_free = self._lib.EVP_CIPHER_CTX_free - self.EVP_CIPHER_CTX_free.restype = None - self.EVP_CIPHER_CTX_free.argtypes = [ctypes.c_void_p] - - self.EVP_CipherUpdate = self._lib.EVP_CipherUpdate - self.EVP_CipherUpdate.restype = ctypes.c_int - self.EVP_CipherUpdate.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_int] - - self.EVP_CipherFinal_ex = self._lib.EVP_CipherFinal_ex - self.EVP_CipherFinal_ex.restype = ctypes.c_int - self.EVP_CipherFinal_ex.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p] - - self.EVP_DigestInit = self._lib.EVP_DigestInit - self.EVP_DigestInit.restype = ctypes.c_int - self._lib.EVP_DigestInit.argtypes = [ctypes.c_void_p, ctypes.c_void_p] - - self.EVP_DigestInit_ex = self._lib.EVP_DigestInit_ex - self.EVP_DigestInit_ex.restype = ctypes.c_int - self._lib.EVP_DigestInit_ex.argtypes = 3 * [ctypes.c_void_p] - - self.EVP_DigestUpdate = self._lib.EVP_DigestUpdate - self.EVP_DigestUpdate.restype = ctypes.c_int - self.EVP_DigestUpdate.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_int] - - self.EVP_DigestFinal = self._lib.EVP_DigestFinal - self.EVP_DigestFinal.restype = ctypes.c_int - self.EVP_DigestFinal.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p] - - self.EVP_DigestFinal_ex = self._lib.EVP_DigestFinal_ex - self.EVP_DigestFinal_ex.restype = ctypes.c_int - self.EVP_DigestFinal_ex.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p] - - self.ECDSA_sign = self._lib.ECDSA_sign - self.ECDSA_sign.restype = ctypes.c_int - self.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p] - - self.ECDSA_verify = self._lib.ECDSA_verify - self.ECDSA_verify.restype = ctypes.c_int - self.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_void_p] - - if self._hexversion >= 0x10100000 and not self._libreSSL: - self.EVP_MD_CTX_new = self._lib.EVP_MD_CTX_new - self.EVP_MD_CTX_new.restype = ctypes.c_void_p - self.EVP_MD_CTX_new.argtypes = [] - - self.EVP_MD_CTX_reset = self._lib.EVP_MD_CTX_reset - self.EVP_MD_CTX_reset.restype = None - self.EVP_MD_CTX_reset.argtypes = [ctypes.c_void_p] - - self.EVP_MD_CTX_free = self._lib.EVP_MD_CTX_free - self.EVP_MD_CTX_free.restype = None - self.EVP_MD_CTX_free.argtypes = [ctypes.c_void_p] - - self.EVP_sha1 = self._lib.EVP_sha1 - self.EVP_sha1.restype = ctypes.c_void_p - self.EVP_sha1.argtypes = [] - - self.digest_ecdsa_sha1 = self.EVP_sha1 - else: - self.EVP_MD_CTX_create = self._lib.EVP_MD_CTX_create - self.EVP_MD_CTX_create.restype = ctypes.c_void_p - self.EVP_MD_CTX_create.argtypes = [] - - self.EVP_MD_CTX_init = self._lib.EVP_MD_CTX_init - self.EVP_MD_CTX_init.restype = None - self.EVP_MD_CTX_init.argtypes = [ctypes.c_void_p] - - self.EVP_MD_CTX_destroy = self._lib.EVP_MD_CTX_destroy - self.EVP_MD_CTX_destroy.restype = None - self.EVP_MD_CTX_destroy.argtypes = [ctypes.c_void_p] - - self.EVP_ecdsa = self._lib.EVP_ecdsa - self._lib.EVP_ecdsa.restype = ctypes.c_void_p - self._lib.EVP_ecdsa.argtypes = [] - - self.digest_ecdsa_sha1 = self.EVP_ecdsa - - self.RAND_bytes = self._lib.RAND_bytes - self.RAND_bytes.restype = ctypes.c_int - self.RAND_bytes.argtypes = [ctypes.c_void_p, ctypes.c_int] - - self.EVP_sha256 = self._lib.EVP_sha256 - self.EVP_sha256.restype = ctypes.c_void_p - self.EVP_sha256.argtypes = [] - - self.i2o_ECPublicKey = self._lib.i2o_ECPublicKey - self.i2o_ECPublicKey.restype = ctypes.c_void_p - self.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p] - - self.EVP_sha512 = self._lib.EVP_sha512 - self.EVP_sha512.restype = ctypes.c_void_p - self.EVP_sha512.argtypes = [] - - self.HMAC = self._lib.HMAC - self.HMAC.restype = ctypes.c_void_p - self.HMAC.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p, ctypes.c_void_p] - - try: - self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC - except Exception: - # The above is not compatible with all versions of OSX. - self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC_SHA1 - - self.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int - self.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int, - ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_void_p] - - # Blind signature requirements - self.BN_CTX_new = self._lib.BN_CTX_new - self.BN_CTX_new.restype = ctypes.c_void_p - self.BN_CTX_new.argtypes = [] - - self.BN_dup = self._lib.BN_dup - self.BN_dup.restype = ctypes.c_void_p - self.BN_dup.argtypes = [ctypes.c_void_p] - - self.BN_rand = self._lib.BN_rand - self.BN_rand.restype = ctypes.c_int - self.BN_rand.argtypes = [ctypes.c_void_p, - ctypes.c_int, - ctypes.c_int] - - self.BN_set_word = self._lib.BN_set_word - self.BN_set_word.restype = ctypes.c_int - self.BN_set_word.argtypes = [ctypes.c_void_p, - ctypes.c_ulong] - - self.BN_mul = self._lib.BN_mul - self.BN_mul.restype = ctypes.c_int - self.BN_mul.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.BN_mod_add = self._lib.BN_mod_add - self.BN_mod_add.restype = ctypes.c_int - self.BN_mod_add.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.BN_mod_inverse = self._lib.BN_mod_inverse - self.BN_mod_inverse.restype = ctypes.c_void_p - self.BN_mod_inverse.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.BN_mod_mul = self._lib.BN_mod_mul - self.BN_mod_mul.restype = ctypes.c_int - self.BN_mod_mul.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.BN_lshift = self._lib.BN_lshift - self.BN_lshift.restype = ctypes.c_int - self.BN_lshift.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_int] - - self.BN_sub_word = self._lib.BN_sub_word - self.BN_sub_word.restype = ctypes.c_int - self.BN_sub_word.argtypes = [ctypes.c_void_p, - ctypes.c_ulong] - - self.BN_cmp = self._lib.BN_cmp - self.BN_cmp.restype = ctypes.c_int - self.BN_cmp.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - try: - self.BN_is_odd = self._lib.BN_is_odd - self.BN_is_odd.restype = ctypes.c_int - self.BN_is_odd.argtypes = [ctypes.c_void_p] - except AttributeError: - # OpenSSL 1.1.0 implements this as a function, but earlier - # versions as macro, so we need to workaround - self.BN_is_odd = self.BN_is_odd_compatible - - self.BN_bn2dec = self._lib.BN_bn2dec - self.BN_bn2dec.restype = ctypes.c_char_p - self.BN_bn2dec.argtypes = [ctypes.c_void_p] - - self.EC_GROUP_new_by_curve_name = self._lib.EC_GROUP_new_by_curve_name - self.EC_GROUP_new_by_curve_name.restype = ctypes.c_void_p - self.EC_GROUP_new_by_curve_name.argtypes = [ctypes.c_int] - - self.EC_GROUP_get_order = self._lib.EC_GROUP_get_order - self.EC_GROUP_get_order.restype = ctypes.c_int - self.EC_GROUP_get_order.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_GROUP_get_cofactor = self._lib.EC_GROUP_get_cofactor - self.EC_GROUP_get_cofactor.restype = ctypes.c_int - self.EC_GROUP_get_cofactor.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_GROUP_get0_generator = self._lib.EC_GROUP_get0_generator - self.EC_GROUP_get0_generator.restype = ctypes.c_void_p - self.EC_GROUP_get0_generator.argtypes = [ctypes.c_void_p] - - self.EC_POINT_copy = self._lib.EC_POINT_copy - self.EC_POINT_copy.restype = ctypes.c_int - self.EC_POINT_copy.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_POINT_add = self._lib.EC_POINT_add - self.EC_POINT_add.restype = ctypes.c_int - self.EC_POINT_add.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_POINT_cmp = self._lib.EC_POINT_cmp - self.EC_POINT_cmp.restype = ctypes.c_int - self.EC_POINT_cmp.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_POINT_set_to_infinity = self._lib.EC_POINT_set_to_infinity - self.EC_POINT_set_to_infinity.restype = ctypes.c_int - self.EC_POINT_set_to_infinity.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self._set_ciphers() - self._set_curves() - - def _set_ciphers(self): - self.cipher_algo = { - 'aes-128-cbc': CipherName( - 'aes-128-cbc', self.EVP_aes_128_cbc, 16), - 'aes-256-cbc': CipherName( - 'aes-256-cbc', self.EVP_aes_256_cbc, 16), - 'aes-128-cfb': CipherName( - 'aes-128-cfb', self.EVP_aes_128_cfb128, 16), - 'aes-256-cfb': CipherName( - 'aes-256-cfb', self.EVP_aes_256_cfb128, 16), - 'aes-128-ofb': CipherName( - 'aes-128-ofb', self._lib.EVP_aes_128_ofb, 16), - 'aes-256-ofb': CipherName( - 'aes-256-ofb', self._lib.EVP_aes_256_ofb, 16), - # 'aes-128-ctr': CipherName( - # 'aes-128-ctr', self._lib.EVP_aes_128_ctr, 16), - # 'aes-256-ctr': CipherName( - # 'aes-256-ctr', self._lib.EVP_aes_256_ctr, 16), - 'bf-cfb': CipherName( - 'bf-cfb', self.EVP_bf_cfb64, 8), - 'bf-cbc': CipherName( - 'bf-cbc', self.EVP_bf_cbc, 8), - # 128 is the initialisation size not block size - 'rc4': CipherName( - 'rc4', self.EVP_rc4, 128), - } - - def _set_curves(self): - self.curves = { - 'secp112r1': 704, - 'secp112r2': 705, - 'secp128r1': 706, - 'secp128r2': 707, - 'secp160k1': 708, - 'secp160r1': 709, - 'secp160r2': 710, - 'secp192k1': 711, - 'secp224k1': 712, - 'secp224r1': 713, - 'secp256k1': 714, - 'secp384r1': 715, - 'secp521r1': 716, - 'sect113r1': 717, - 'sect113r2': 718, - 'sect131r1': 719, - 'sect131r2': 720, - 'sect163k1': 721, - 'sect163r1': 722, - 'sect163r2': 723, - 'sect193r1': 724, - 'sect193r2': 725, - 'sect233k1': 726, - 'sect233r1': 727, - 'sect239k1': 728, - 'sect283k1': 729, - 'sect283r1': 730, - 'sect409k1': 731, - 'sect409r1': 732, - 'sect571k1': 733, - 'sect571r1': 734, - } - - def BN_num_bytes(self, x): - """ - returns the length of a BN (OpenSSl API) - """ - return int((self.BN_num_bits(x) + 7) / 8) - - def BN_is_odd_compatible(self, x): - """ - returns if BN is odd - we assume big endianness, and that BN is initialised - """ - length = self.BN_num_bytes(x) - data = self.malloc(0, length) - OpenSSL.BN_bn2bin(x, data) - return ord(data[length - 1]) & 1 - - def get_cipher(self, name): - """ - returns the OpenSSL cipher instance - """ - if name not in self.cipher_algo: - raise Exception("Unknown cipher") - return self.cipher_algo[name] - - def get_curve(self, name): - """ - returns the id of a elliptic curve - """ - if name not in self.curves: - raise Exception("Unknown curve") - return self.curves[name] - - def get_curve_by_id(self, id_): - """ - returns the name of a elliptic curve with his id - """ - res = None - for i in self.curves: - if self.curves[i] == id_: - res = i - break - if res is None: - raise Exception("Unknown curve") - return res - - def rand(self, size): - """ - OpenSSL random function - """ - buffer_ = self.malloc(0, size) - # This pyelliptic library, by default, didn't check the return value - # of RAND_bytes. It is evidently possible that it returned an error - # and not-actually-random data. However, in tests on various - # operating systems, while generating hundreds of gigabytes of random - # strings of various sizes I could not get an error to occur. - # Also Bitcoin doesn't check the return value of RAND_bytes either. - # Fixed in Bitmessage version 0.4.2 (in source code on 2013-10-13) - while self.RAND_bytes(buffer_, size) != 1: - import time - time.sleep(1) - return buffer_.raw - - def malloc(self, data, size): - """ - returns a create_string_buffer (ctypes) - """ - buffer_ = None - if data != 0: - if sys.version_info.major == 3 and isinstance(data, type('')): - data = data.encode() - buffer_ = self.create_string_buffer(data, size) - else: - buffer_ = self.create_string_buffer(size) - return buffer_ - - -def loadOpenSSL(): - """This function finds and load the OpenSSL library""" - # pylint: disable=global-statement - global OpenSSL - from os import path, environ - from ctypes.util import find_library - - libdir = [] - if getattr(sys, 'frozen', None): - if 'darwin' in sys.platform: - libdir.extend([ - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.1.1.0.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.1.0.2.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.1.0.1.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.1.0.0.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.0.9.8.dylib'), - ]) - elif 'win32' in sys.platform or 'win64' in sys.platform: - libdir.append(path.join(sys._MEIPASS, 'libeay32.dll')) - else: - libdir.extend([ - path.join(sys._MEIPASS, 'libcrypto.so'), - path.join(sys._MEIPASS, 'libssl.so'), - path.join(sys._MEIPASS, 'libcrypto.so.1.1.0'), - path.join(sys._MEIPASS, 'libssl.so.1.1.0'), - path.join(sys._MEIPASS, 'libcrypto.so.1.0.2'), - path.join(sys._MEIPASS, 'libssl.so.1.0.2'), - path.join(sys._MEIPASS, 'libcrypto.so.1.0.1'), - path.join(sys._MEIPASS, 'libssl.so.1.0.1'), - path.join(sys._MEIPASS, 'libcrypto.so.1.0.0'), - path.join(sys._MEIPASS, 'libssl.so.1.0.0'), - path.join(sys._MEIPASS, 'libcrypto.so.0.9.8'), - path.join(sys._MEIPASS, 'libssl.so.0.9.8'), - ]) - if 'darwin' in sys.platform: - libdir.extend([ - 'libcrypto.dylib', '/usr/local/opt/openssl/lib/libcrypto.dylib']) - elif 'win32' in sys.platform or 'win64' in sys.platform: - libdir.append('libeay32.dll') - else: - libdir.append('libcrypto.so') - libdir.append('libssl.so') - libdir.append('libcrypto.so.1.0.0') - libdir.append('libssl.so.1.0.0') - if 'linux' in sys.platform or 'darwin' in sys.platform \ - or 'bsd' in sys.platform: - libdir.append(find_library('ssl')) - elif 'win32' in sys.platform or 'win64' in sys.platform: - libdir.append(find_library('libeay32')) - for library in libdir: - try: - OpenSSL = _OpenSSL(library) - return - except Exception: - pass - raise Exception( - "Couldn't find and load the OpenSSL library. You must install it.") - - -loadOpenSSL() diff --git a/src/tests/mock/pybitmessage/pathmagic.py b/src/tests/mock/pybitmessage/pathmagic.py deleted file mode 100644 index 3f32c0c1..00000000 --- a/src/tests/mock/pybitmessage/pathmagic.py +++ /dev/null @@ -1,10 +0,0 @@ -import os -import sys - - -def setup(): - """Add path to this file to sys.path""" - app_dir = os.path.dirname(os.path.abspath(__file__)) - os.chdir(app_dir) - sys.path.insert(0, app_dir) - return app_dir diff --git a/src/tests/mock/pybitmessage/paths.py b/src/tests/mock/pybitmessage/paths.py deleted file mode 100644 index e2f8c97e..00000000 --- a/src/tests/mock/pybitmessage/paths.py +++ /dev/null @@ -1,131 +0,0 @@ -""" -Path related functions -""" -import logging -import os -import re -import sys -from datetime import datetime -from shutil import move - -logger = logging.getLogger('default') - -# When using py2exe or py2app, the variable frozen is added to the sys -# namespace. This can be used to setup a different code path for -# binary distributions vs source distributions. -frozen = getattr(sys, 'frozen', None) - - -def lookupExeFolder(): - """Returns executable folder path""" - if frozen: - exeFolder = ( - # targetdir/Bitmessage.app/Contents/MacOS/Bitmessage - os.path.dirname(sys.executable).split(os.path.sep)[0] + os.path.sep - if frozen == "macosx_app" else - os.path.dirname(sys.executable) + os.path.sep) - elif __file__: - exeFolder = os.path.dirname(__file__) + os.path.sep - else: - exeFolder = '' - return exeFolder - - -def lookupAppdataFolder(): - """Returns path of the folder where application data is stored""" - APPNAME = "PyBitmessage" - dataFolder = os.environ.get('BITMESSAGE_HOME') - if dataFolder: - if dataFolder[-1] not in (os.path.sep, os.path.altsep): - dataFolder += os.path.sep - elif sys.platform == 'darwin': - try: - dataFolder = os.path.join( - os.environ['HOME'], - 'Library/Application Support/', APPNAME - ) + '/' - - except KeyError: - sys.exit( - 'Could not find home folder, please report this message' - ' and your OS X version to the BitMessage Github.') - elif 'win32' in sys.platform or 'win64' in sys.platform: - dataFolder = os.path.join( - os.environ['APPDATA'].decode( - sys.getfilesystemencoding(), 'ignore'), APPNAME - ) + os.path.sep - else: - try: - dataFolder = os.path.join(os.environ['XDG_CONFIG_HOME'], APPNAME) - except KeyError: - dataFolder = os.path.join(os.environ['HOME'], '.config', APPNAME) - - # Migrate existing data to the proper location - # if this is an existing install - try: - move(os.path.join(os.environ['HOME'], '.%s' % APPNAME), dataFolder) - logger.info('Moving data folder to %s', dataFolder) - except IOError: - # Old directory may not exist. - pass - dataFolder = dataFolder + os.path.sep - return dataFolder - - -def codePath(): - """Returns path to the program sources""" - if not frozen: - return os.path.dirname(__file__) - return ( - os.environ.get('RESOURCEPATH') - # pylint: disable=protected-access - if frozen == "macosx_app" else sys._MEIPASS) - - -def tail(f, lines=20): - """Returns last lines in the f file object""" - total_lines_wanted = lines - - BLOCK_SIZE = 1024 - f.seek(0, 2) - block_end_byte = f.tell() - lines_to_go = total_lines_wanted - block_number = -1 - # blocks of size BLOCK_SIZE, in reverse order starting - # from the end of the file - blocks = [] - while lines_to_go > 0 and block_end_byte > 0: - if block_end_byte - BLOCK_SIZE > 0: - # read the last block we haven't yet read - f.seek(block_number * BLOCK_SIZE, 2) - blocks.append(f.read(BLOCK_SIZE)) - else: - # file too small, start from begining - f.seek(0, 0) - # only read what was not read - blocks.append(f.read(block_end_byte)) - lines_found = blocks[-1].count('\n') - lines_to_go -= lines_found - block_end_byte -= BLOCK_SIZE - block_number -= 1 - all_read_text = ''.join(reversed(blocks)) - return '\n'.join(all_read_text.splitlines()[-total_lines_wanted:]) - - -def lastCommit(): - """ - Returns last commit information as dict with 'commit' and 'time' keys - """ - githeadfile = os.path.join(codePath(), '..', '.git', 'logs', 'HEAD') - result = {} - if os.path.isfile(githeadfile): - try: - with open(githeadfile, 'rt') as githead: - line = tail(githead, 1) - result['commit'] = line.split()[1] - result['time'] = datetime.fromtimestamp( - float(re.search(r'>\s*(.*?)\s', line).group(1)) - ) - except (IOError, AttributeError, TypeError): - pass - return result diff --git a/src/tests/mock/pybitmessage/proofofwork.py b/src/tests/mock/pybitmessage/proofofwork.py deleted file mode 100644 index 148d6734..00000000 --- a/src/tests/mock/pybitmessage/proofofwork.py +++ /dev/null @@ -1,394 +0,0 @@ -# pylint: disable=too-many-branches,too-many-statements,protected-access -""" -Proof of work calculation -""" - -import ctypes -import hashlib -import os -import sys -import tempfile -import time -from struct import pack, unpack -from subprocess import call - -import openclpow -import paths -import queues -import state -import tr -from bmconfigparser import BMConfigParser -from debug import logger - -bitmsglib = 'bitmsghash.so' -bmpow = None - - -class LogOutput(object): # pylint: disable=too-few-public-methods - """ - A context manager that block stdout for its scope - and appends it's content to log before exit. Usage:: - - with LogOutput(): - os.system('ls -l') - - https://stackoverflow.com/questions/5081657 - """ - - def __init__(self, prefix='PoW'): - self.prefix = prefix - try: - sys.stdout.flush() - self._stdout = sys.stdout - self._stdout_fno = os.dup(sys.stdout.fileno()) - except AttributeError: - # NullWriter instance has no attribute 'fileno' on Windows - self._stdout = None - else: - self._dst, self._filepath = tempfile.mkstemp() - - def __enter__(self): - if not self._stdout: - return - stdout = os.dup(1) - os.dup2(self._dst, 1) - os.close(self._dst) - sys.stdout = os.fdopen(stdout, 'w') - - def __exit__(self, exc_type, exc_val, exc_tb): - if not self._stdout: - return - sys.stdout.close() - sys.stdout = self._stdout - sys.stdout.flush() - os.dup2(self._stdout_fno, 1) - - with open(self._filepath) as out: - for line in out: - logger.info('%s: %s', self.prefix, line) - os.remove(self._filepath) - - -def _set_idle(): - if 'linux' in sys.platform: - os.nice(20) - else: - try: - # pylint: disable=no-member,import-error - sys.getwindowsversion() - import win32api - import win32process - import win32con - pid = win32api.GetCurrentProcessId() - handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid) - win32process.SetPriorityClass(handle, win32process.IDLE_PRIORITY_CLASS) - except: # noqa:E722 - # Windows 64-bit - pass - - -def _pool_worker(nonce, initialHash, target, pool_size): - _set_idle() - trialValue = float('inf') - while trialValue > target: - nonce += pool_size - trialValue, = unpack('>Q', hashlib.sha512(hashlib.sha512( - pack('>Q', nonce) + initialHash).digest()).digest()[0:8]) - return [trialValue, nonce] - - -def _doSafePoW(target, initialHash): - logger.debug("Safe PoW start") - nonce = 0 - trialValue = float('inf') - while trialValue > target and state.shutdown == 0: - nonce += 1 - trialValue, = unpack('>Q', hashlib.sha512(hashlib.sha512( - pack('>Q', nonce) + initialHash).digest()).digest()[0:8]) - if state.shutdown != 0: - raise StopIteration("Interrupted") # pylint: misplaced-bare-raise - logger.debug("Safe PoW done") - return [trialValue, nonce] - - -def _doFastPoW(target, initialHash): - logger.debug("Fast PoW start") - from multiprocessing import Pool, cpu_count - try: - pool_size = cpu_count() - except: # noqa:E722 - pool_size = 4 - try: - maxCores = BMConfigParser().getint('bitmessagesettings', 'maxcores') - except: # noqa:E722 - maxCores = 99999 - if pool_size > maxCores: - pool_size = maxCores - - pool = Pool(processes=pool_size) - result = [] - for i in range(pool_size): - result.append(pool.apply_async(_pool_worker, args=(i, initialHash, target, pool_size))) - - while True: - if state.shutdown > 0: - try: - pool.terminate() - pool.join() - except: # noqa:E722 - pass - raise StopIteration("Interrupted") - for i in range(pool_size): - if result[i].ready(): - try: - result[i].successful() - except AssertionError: - pool.terminate() - pool.join() - raise StopIteration("Interrupted") - result = result[i].get() - pool.terminate() - pool.join() - logger.debug("Fast PoW done") - return result[0], result[1] - time.sleep(0.2) - - -def _doCPoW(target, initialHash): - with LogOutput(): - h = initialHash - m = target - out_h = ctypes.pointer(ctypes.create_string_buffer(h, 64)) - out_m = ctypes.c_ulonglong(m) - logger.debug("C PoW start") - nonce = bmpow(out_h, out_m) - - trialValue, = unpack('>Q', hashlib.sha512(hashlib.sha512(pack('>Q', nonce) + initialHash).digest()).digest()[0:8]) - if state.shutdown != 0: - raise StopIteration("Interrupted") - logger.debug("C PoW done") - return [trialValue, nonce] - - -def _doGPUPoW(target, initialHash): - logger.debug("GPU PoW start") - nonce = openclpow.do_opencl_pow(initialHash.encode("hex"), target) - trialValue, = unpack('>Q', hashlib.sha512(hashlib.sha512(pack('>Q', nonce) + initialHash).digest()).digest()[0:8]) - if trialValue > target: - deviceNames = ", ".join(gpu.name for gpu in openclpow.enabledGpus) - queues.UISignalQueue.put(( - 'updateStatusBar', ( - tr._translate( - "MainWindow", - 'Your GPU(s) did not calculate correctly, disabling OpenCL. Please report to the developers.' - ), - 1))) - logger.error( - "Your GPUs (%s) did not calculate correctly, disabling OpenCL. Please report to the developers.", - deviceNames) - openclpow.enabledGpus = [] - raise Exception("GPU did not calculate correctly.") - if state.shutdown != 0: - raise StopIteration("Interrupted") - logger.debug("GPU PoW done") - return [trialValue, nonce] - - -def estimate(difficulty, format=False): # pylint: disable=redefined-builtin - """ - .. todo: fix unused variable - """ - ret = difficulty / 10 - if ret < 1: - ret = 1 - - if format: - # pylint: disable=unused-variable - out = str(int(ret)) + " seconds" - if ret > 60: - ret /= 60 - out = str(int(ret)) + " minutes" - if ret > 60: - ret /= 60 - out = str(int(ret)) + " hours" - if ret > 24: - ret /= 24 - out = str(int(ret)) + " days" - if ret > 7: - out = str(int(ret)) + " weeks" - if ret > 31: - out = str(int(ret)) + " months" - if ret > 366: - ret /= 366 - out = str(int(ret)) + " years" - ret = None # Ensure legacy behaviour - - return ret - - -def getPowType(): - """Get the proof of work implementation""" - - if openclpow.openclEnabled(): - return "OpenCL" - if bmpow: - return "C" - return "python" - - -def notifyBuild(tried=False): - """Notify the user of the success or otherwise of building the PoW C module""" - - if bmpow: - queues.UISignalQueue.put(('updateStatusBar', (tr._translate( - "proofofwork", "C PoW module built successfully."), 1))) - elif tried: - queues.UISignalQueue.put( - ( - 'updateStatusBar', ( - tr._translate( - "proofofwork", - "Failed to build C PoW module. Please build it manually." - ), - 1 - ) - ) - ) - else: - queues.UISignalQueue.put(('updateStatusBar', (tr._translate( - "proofofwork", "C PoW module unavailable. Please build it."), 1))) - - -def buildCPoW(): - """Attempt to build the PoW C module""" - if bmpow is not None: - return - if paths.frozen is not None: - notifyBuild(False) - return - if sys.platform in ["win32", "win64"]: - notifyBuild(False) - return - try: - if "bsd" in sys.platform: - # BSD make - call(["make", "-C", os.path.join(paths.codePath(), "bitmsghash"), '-f', 'Makefile.bsd']) - else: - # GNU make - call(["make", "-C", os.path.join(paths.codePath(), "bitmsghash")]) - if os.path.exists(os.path.join(paths.codePath(), "bitmsghash", "bitmsghash.so")): - init() - notifyBuild(True) - else: - notifyBuild(True) - except: # noqa:E722 - notifyBuild(True) - - -def run(target, initialHash): - """Run the proof of work thread""" - - if state.shutdown != 0: - raise # pylint: disable=misplaced-bare-raise - target = int(target) - if openclpow.openclEnabled(): - try: - return _doGPUPoW(target, initialHash) - except StopIteration: - raise - except: # noqa:E722 - pass # fallback - if bmpow: - try: - return _doCPoW(target, initialHash) - except StopIteration: - raise - except: # noqa:E722 - pass # fallback - if paths.frozen == "macosx_app" or not paths.frozen: - # on my (Peter Surda) Windows 10, Windows Defender - # does not like this and fights with PyBitmessage - # over CPU, resulting in very slow PoW - # added on 2015-11-29: multiprocesing.freeze_support() doesn't help - try: - return _doFastPoW(target, initialHash) - except StopIteration: - logger.error("Fast PoW got StopIteration") - raise - except: # noqa:E722 - logger.error("Fast PoW got exception:", exc_info=True) - try: - return _doSafePoW(target, initialHash) - except StopIteration: - raise - except: # noqa:E722 - pass # fallback - - -def resetPoW(): - """Initialise the OpenCL PoW""" - openclpow.initCL() - - -# init - - -def init(): - """Initialise PoW""" - # pylint: disable=global-statement - global bitmsglib, bmpow - - openclpow.initCL() - if sys.platform == "win32": - if ctypes.sizeof(ctypes.c_voidp) == 4: - bitmsglib = 'bitmsghash32.dll' - else: - bitmsglib = 'bitmsghash64.dll' - try: - # MSVS - bso = ctypes.WinDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) - logger.info("Loaded C PoW DLL (stdcall) %s", bitmsglib) - bmpow = bso.BitmessagePOW - bmpow.restype = ctypes.c_ulonglong - _doCPoW(2**63, "") - logger.info("Successfully tested C PoW DLL (stdcall) %s", bitmsglib) - except ValueError: - try: - # MinGW - bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) - logger.info("Loaded C PoW DLL (cdecl) %s", bitmsglib) - bmpow = bso.BitmessagePOW - bmpow.restype = ctypes.c_ulonglong - _doCPoW(2**63, "") - logger.info("Successfully tested C PoW DLL (cdecl) %s", bitmsglib) - except Exception as e: - logger.error("Error: %s", e, exc_info=True) - bso = None - except Exception as e: - logger.error("Error: %s", e, exc_info=True) - bso = None - else: - try: - bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib)) - except OSError: - import glob - try: - bso = ctypes.CDLL(glob.glob(os.path.join( - paths.codePath(), "bitmsghash", "bitmsghash*.so" - ))[0]) - except (OSError, IndexError): - bso = None - except: # noqa:E722 - bso = None - else: - logger.info("Loaded C PoW DLL %s", bitmsglib) - if bso: - try: - bmpow = bso.BitmessagePOW - bmpow.restype = ctypes.c_ulonglong - except: # noqa:E722 - bmpow = None - else: - bmpow = None - if bmpow is None: - buildCPoW() diff --git a/src/tests/mock/pybitmessage/protocol.py b/src/tests/mock/pybitmessage/protocol.py deleted file mode 100644 index 1934d9cc..00000000 --- a/src/tests/mock/pybitmessage/protocol.py +++ /dev/null @@ -1,524 +0,0 @@ -""" -Low-level protocol-related functions. -""" -# pylint: disable=too-many-boolean-expressions,too-many-return-statements -# pylint: disable=too-many-locals,too-many-statements - -import base64 -import hashlib -import random -import socket -import sys -import time -from binascii import hexlify -from struct import Struct, pack, unpack - -import defaults -import highlevelcrypto -import state -from addresses import ( - encodeVarint, decodeVarint, decodeAddress, varintDecodeError) -from bmconfigparser import BMConfigParser -from debug import logger -from fallback import RIPEMD160Hash -from helper_sql import sqlExecute -from version import softwareVersion - -# Service flags -#: This is a normal network node -NODE_NETWORK = 1 -#: This node supports SSL/TLS in the current connect (python < 2.7.9 -#: only supports an SSL client, so in that case it would only have this -#: on when the connection is a client). -NODE_SSL = 2 -# (Proposal) This node may do PoW on behalf of some its peers -# (PoW offloading/delegating), but it doesn't have to. Clients may have -# to meet additional requirements (e.g. TLS authentication) -# NODE_POW = 4 -#: Node supports dandelion -NODE_DANDELION = 8 - -# Bitfield flags -BITFIELD_DOESACK = 1 - -# Error types -STATUS_WARNING = 0 -STATUS_ERROR = 1 -STATUS_FATAL = 2 - -# Object types -OBJECT_GETPUBKEY = 0 -OBJECT_PUBKEY = 1 -OBJECT_MSG = 2 -OBJECT_BROADCAST = 3 -OBJECT_ONIONPEER = 0x746f72 -OBJECT_I2P = 0x493250 -OBJECT_ADDR = 0x61646472 - -eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack( - '>Q', random.randrange(1, 18446744073709551615)) - -# Compiled struct for packing/unpacking headers -# New code should use CreatePacket instead of Header.pack -Header = Struct('!L12sL4s') - -VersionPacket = Struct('>LqQ20s4s36sH') - -# Bitfield - - -def getBitfield(address): - """Get a bitfield from an address""" - # bitfield of features supported by me (see the wiki). - bitfield = 0 - # send ack - if not BMConfigParser().safeGetBoolean(address, 'dontsendack'): - bitfield |= BITFIELD_DOESACK - return pack('>I', bitfield) - - -def checkBitfield(bitfieldBinary, flags): - """Check if a bitfield matches the given flags""" - bitfield, = unpack('>I', bitfieldBinary) - return (bitfield & flags) == flags - - -def isBitSetWithinBitfield(fourByteString, n): - """Check if a particular bit is set in a bitfeld""" - # Uses MSB 0 bit numbering across 4 bytes of data - n = 31 - n - x, = unpack('>L', fourByteString) - return x & 2**n != 0 - - -# IP addresses - - -def encodeHost(host): - """Encode a given host to be used in low-level socket operations""" - if host.find('.onion') > -1: - return b'\xfd\x87\xd8\x7e\xeb\x43' + base64.b32decode( - host.split(".")[0], True) - elif host.find(':') == -1: - return b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + \ - socket.inet_aton(host) - return socket.inet_pton(socket.AF_INET6, host) - - -def networkType(host): - """Determine if a host is IPv4, IPv6 or an onion address""" - if host.find('.onion') > -1: - return 'onion' - elif host.find(':') == -1: - return 'IPv4' - return 'IPv6' - - -def network_group(host): - """Canonical identifier of network group - simplified, borrowed from - GetGroup() in src/netaddresses.cpp in bitcoin core""" - if not isinstance(host, str): - return None - network_type = networkType(host) - try: - raw_host = encodeHost(host) - except socket.error: - return host - if network_type == 'IPv4': - decoded_host = checkIPv4Address(raw_host[12:], True) - if decoded_host: - # /16 subnet - return raw_host[12:14] - elif network_type == 'IPv6': - decoded_host = checkIPv6Address(raw_host, True) - if decoded_host: - # /32 subnet - return raw_host[0:12] - else: - # just host, e.g. for tor - return host - # global network type group for local, private, unroutable - return network_type - - -def checkIPAddress(host, private=False): - """ - Returns hostStandardFormat if it is a valid IP address, - otherwise returns False - """ - if host[0:12] == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF': - hostStandardFormat = socket.inet_ntop(socket.AF_INET, host[12:]) - return checkIPv4Address(host[12:], hostStandardFormat, private) - elif host[0:6] == b'\xfd\x87\xd8\x7e\xeb\x43': - # Onion, based on BMD/bitcoind - hostStandardFormat = base64.b32encode(host[6:]).lower() + ".onion" - if private: - return False - return hostStandardFormat - else: - try: - hostStandardFormat = socket.inet_ntop(socket.AF_INET6, host) - except ValueError: - return False - if len(hostStandardFormat) == 0: - # This can happen on Windows systems which are - # not 64-bit compatible so let us drop the IPv6 address. - return False - return checkIPv6Address(host, hostStandardFormat, private) - - -def checkIPv4Address(host, hostStandardFormat, private=False): - """ - Returns hostStandardFormat if it is an IPv4 address, - otherwise returns False - """ - if host[0:1] == b'\x7F': # 127/8 - if not private: - logger.debug( - 'Ignoring IP address in loopback range: %s', - hostStandardFormat) - return hostStandardFormat if private else False - if host[0:1] == b'\x0A': # 10/8 - if not private: - logger.debug( - 'Ignoring IP address in private range: %s', hostStandardFormat) - return hostStandardFormat if private else False - if host[0:2] == b'\xC0\xA8': # 192.168/16 - if not private: - logger.debug( - 'Ignoring IP address in private range: %s', hostStandardFormat) - return hostStandardFormat if private else False - if host[0:2] >= b'\xAC\x10' and host[0:2] < b'\xAC\x20': # 172.16/12 - if not private: - logger.debug( - 'Ignoring IP address in private range: %s', hostStandardFormat) - return hostStandardFormat if private else False - return False if private else hostStandardFormat - - -def checkIPv6Address(host, hostStandardFormat, private=False): - """ - Returns hostStandardFormat if it is an IPv6 address, - otherwise returns False - """ - if host == b'\x00' * 15 + b'\x01': - if not private: - logger.debug('Ignoring loopback address: %s', hostStandardFormat) - return False - try: - host = [ord(c) for c in host[:2]] - except TypeError: # python3 has ints already - pass - if host[0] == 0xfe and host[1] & 0xc0 == 0x80: - if not private: - logger.debug('Ignoring local address: %s', hostStandardFormat) - return hostStandardFormat if private else False - if host[0] & 0xfe == 0xfc: - if not private: - logger.debug( - 'Ignoring unique local address: %s', hostStandardFormat) - return hostStandardFormat if private else False - return False if private else hostStandardFormat - - -def haveSSL(server=False): - """ - Predicate to check if ECDSA server support is required and available - - python < 2.7.9's ssl library does not support ECDSA server due to - missing initialisation of available curves, but client works ok - """ - if not server: - return True - elif sys.version_info >= (2, 7, 9): - return True - return False - - -def checkSocksIP(host): - """Predicate to check if we're using a SOCKS proxy""" - sockshostname = BMConfigParser().safeGet( - 'bitmessagesettings', 'sockshostname') - try: - if not state.socksIP: - state.socksIP = socket.gethostbyname(sockshostname) - except NameError: # uninitialised - state.socksIP = socket.gethostbyname(sockshostname) - except (TypeError, socket.gaierror): # None, resolving failure - state.socksIP = sockshostname - return state.socksIP == host - - -def isProofOfWorkSufficient( - data, nonceTrialsPerByte=0, payloadLengthExtraBytes=0, recvTime=0): - """ - Validate an object's Proof of Work using method described - `here `_ - - Arguments: - int nonceTrialsPerByte (default: from `.defaults`) - int payloadLengthExtraBytes (default: from `.defaults`) - float recvTime (optional) UNIX epoch time when object was - received from the network (default: current system time) - Returns: - True if PoW valid and sufficient, False in all other cases - """ - if nonceTrialsPerByte < defaults.networkDefaultProofOfWorkNonceTrialsPerByte: - nonceTrialsPerByte = defaults.networkDefaultProofOfWorkNonceTrialsPerByte - if payloadLengthExtraBytes < defaults.networkDefaultPayloadLengthExtraBytes: - payloadLengthExtraBytes = defaults.networkDefaultPayloadLengthExtraBytes - endOfLifeTime, = unpack('>Q', data[8:16]) - TTL = endOfLifeTime - (int(recvTime) if recvTime else int(time.time())) - if TTL < 300: - TTL = 300 - POW, = unpack('>Q', hashlib.sha512(hashlib.sha512( - data[:8] + hashlib.sha512(data[8:]).digest() - ).digest()).digest()[0:8]) - return POW <= 2 ** 64 / ( - nonceTrialsPerByte * ( - len(data) + payloadLengthExtraBytes - + ((TTL * (len(data) + payloadLengthExtraBytes)) / (2 ** 16)))) - - -# Packet creation - - -def CreatePacket(command, payload=b''): - """Construct and return a packet""" - payload_length = len(payload) - checksum = hashlib.sha512(payload).digest()[0:4] - - b = bytearray(Header.size + payload_length) - Header.pack_into(b, 0, 0xE9BEB4D9, command, payload_length, checksum) - b[Header.size:] = payload - return bytes(b) - - -def assembleVersionMessage( - remoteHost, remotePort, participatingStreams, server=False, nodeid=None -): - """ - Construct the payload of a version message, - return the resulting bytes of running `CreatePacket` on it - """ - payload = b'' - payload += pack('>L', 3) # protocol version. - # bitflags of the services I offer. - payload += pack( - '>q', - NODE_NETWORK - | (NODE_SSL if haveSSL(server) else 0) - | (NODE_DANDELION if state.dandelion else 0) - ) - payload += pack('>q', int(time.time())) - - # boolservices of remote connection; ignored by the remote host. - payload += pack('>q', 1) - if checkSocksIP(remoteHost) and server: - # prevent leaking of tor outbound IP - payload += encodeHost('127.0.0.1') - payload += pack('>H', 8444) - else: - # use first 16 bytes if host data is longer - # for example in case of onion v3 service - try: - payload += encodeHost(remoteHost)[:16] - except socket.error: - payload += encodeHost('127.0.0.1') - payload += pack('>H', remotePort) # remote IPv6 and port - - # bitflags of the services I offer. - payload += pack( - '>q', - NODE_NETWORK - | (NODE_SSL if haveSSL(server) else 0) - | (NODE_DANDELION if state.dandelion else 0) - ) - # = 127.0.0.1. This will be ignored by the remote host. - # The actual remote connected IP will be used. - payload += b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF' + pack( - '>L', 2130706433) - # we have a separate extPort and incoming over clearnet - # or outgoing through clearnet - extport = BMConfigParser().safeGetInt('bitmessagesettings', 'extport') - if ( - extport and ((server and not checkSocksIP(remoteHost)) or ( - BMConfigParser().get('bitmessagesettings', 'socksproxytype') - == 'none' and not server)) - ): - payload += pack('>H', extport) - elif checkSocksIP(remoteHost) and server: # incoming connection over Tor - payload += pack( - '>H', BMConfigParser().getint('bitmessagesettings', 'onionport')) - else: # no extport and not incoming over Tor - payload += pack( - '>H', BMConfigParser().getint('bitmessagesettings', 'port')) - - if nodeid is not None: - payload += nodeid[0:8] - else: - payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf - userAgent = ('/PyBitmessage:%s/' % softwareVersion).encode('utf-8') - payload += encodeVarint(len(userAgent)) - payload += userAgent - - # Streams - payload += encodeVarint(len(participatingStreams)) - count = 0 - for stream in sorted(participatingStreams): - payload += encodeVarint(stream) - count += 1 - # protocol limit, see specification - if count >= 160000: - break - - return CreatePacket(b'version', payload) - - -def assembleErrorMessage(fatal=0, banTime=0, inventoryVector='', errorText=''): - """ - Construct the payload of an error message, - return the resulting bytes of running `CreatePacket` on it - """ - payload = encodeVarint(fatal) - payload += encodeVarint(banTime) - payload += encodeVarint(len(inventoryVector)) - payload += inventoryVector - payload += encodeVarint(len(errorText)) - payload += errorText - return CreatePacket(b'error', payload) - - -# Packet decoding - - -def decryptAndCheckPubkeyPayload(data, address): - """ - Version 4 pubkeys are encrypted. This function is run when we - already have the address to which we want to try to send a message. - The 'data' may come either off of the wire or we might have had it - already in our inventory when we tried to send a msg to this - particular address. - """ - try: - addressVersion, streamNumber, ripe = decodeAddress(address)[1:] - - readPosition = 20 # bypass the nonce, time, and object type - embeddedAddressVersion, varintLength = decodeVarint( - data[readPosition:readPosition + 10]) - readPosition += varintLength - embeddedStreamNumber, varintLength = decodeVarint( - data[readPosition:readPosition + 10]) - readPosition += varintLength - # We'll store the address version and stream number - # (and some more) in the pubkeys table. - storedData = data[20:readPosition] - - if addressVersion != embeddedAddressVersion: - logger.info( - 'Pubkey decryption was UNsuccessful' - ' due to address version mismatch.') - return 'failed' - if streamNumber != embeddedStreamNumber: - logger.info( - 'Pubkey decryption was UNsuccessful' - ' due to stream number mismatch.') - return 'failed' - - tag = data[readPosition:readPosition + 32] - readPosition += 32 - # the time through the tag. More data is appended onto - # signedData below after the decryption. - signedData = data[8:readPosition] - encryptedData = data[readPosition:] - - # Let us try to decrypt the pubkey - toAddress, cryptorObject = state.neededPubkeys[tag] - if toAddress != address: - logger.critical( - 'decryptAndCheckPubkeyPayload failed due to toAddress' - ' mismatch. This is very peculiar.' - ' toAddress: %s, address %s', - toAddress, address - ) - # the only way I can think that this could happen - # is if someone encodes their address data two different ways. - # That sort of address-malleability should have been caught - # by the UI or API and an error given to the user. - return 'failed' - try: - decryptedData = cryptorObject.decrypt(encryptedData) - except: # noqa:E722 - # FIXME: use a proper exception after `pyelliptic.ecc` is refactored. - # Someone must have encrypted some data with a different key - # but tagged it with a tag for which we are watching. - logger.info('Pubkey decryption was unsuccessful.') - return 'failed' - - readPosition = 0 - # bitfieldBehaviors = decryptedData[readPosition:readPosition + 4] - readPosition += 4 - publicSigningKey = '\x04' + decryptedData[readPosition:readPosition + 64] - readPosition += 64 - publicEncryptionKey = '\x04' + decryptedData[readPosition:readPosition + 64] - readPosition += 64 - specifiedNonceTrialsPerByteLength = decodeVarint( - decryptedData[readPosition:readPosition + 10])[1] - readPosition += specifiedNonceTrialsPerByteLength - specifiedPayloadLengthExtraBytesLength = decodeVarint( - decryptedData[readPosition:readPosition + 10])[1] - readPosition += specifiedPayloadLengthExtraBytesLength - storedData += decryptedData[:readPosition] - signedData += decryptedData[:readPosition] - signatureLength, signatureLengthLength = decodeVarint( - decryptedData[readPosition:readPosition + 10]) - readPosition += signatureLengthLength - signature = decryptedData[readPosition:readPosition + signatureLength] - - if not highlevelcrypto.verify( - signedData, signature, hexlify(publicSigningKey)): - logger.info( - 'ECDSA verify failed (within decryptAndCheckPubkeyPayload)') - return 'failed' - - logger.info( - 'ECDSA verify passed (within decryptAndCheckPubkeyPayload)') - - sha = hashlib.new('sha512') - sha.update(publicSigningKey + publicEncryptionKey) - embeddedRipe = RIPEMD160Hash(sha.digest()).digest() - - if embeddedRipe != ripe: - # Although this pubkey object had the tag were were looking for - # and was encrypted with the correct encryption key, - # it doesn't contain the correct pubkeys. Someone is - # either being malicious or using buggy software. - logger.info( - 'Pubkey decryption was UNsuccessful due to RIPE mismatch.') - return 'failed' - - # Everything checked out. Insert it into the pubkeys table. - - logger.info( - 'within decryptAndCheckPubkeyPayload, ' - 'addressVersion: %s, streamNumber: %s\nripe %s\n' - 'publicSigningKey in hex: %s\npublicEncryptionKey in hex: %s', - addressVersion, streamNumber, hexlify(ripe), - hexlify(publicSigningKey), hexlify(publicEncryptionKey) - ) - - t = (address, addressVersion, storedData, int(time.time()), 'yes') - sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t) - return 'successful' - except varintDecodeError: - logger.info( - 'Pubkey decryption was UNsuccessful due to a malformed varint.') - return 'failed' - except Exception: - logger.critical( - 'Pubkey decryption was UNsuccessful because of' - ' an unhandled exception! This is definitely a bug!', - exc_info=True - ) - return 'failed' diff --git a/src/tests/mock/pybitmessage/pybitmessage b/src/tests/mock/pybitmessage/pybitmessage deleted file mode 100644 index decebfff..00000000 --- a/src/tests/mock/pybitmessage/pybitmessage +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python2.7 - -import os -import pkg_resources - -dist = pkg_resources.get_distribution('pybitmessage') -script_file = os.path.join(dist.location, dist.key, 'bitmessagemain.py') -new_globals = globals() -new_globals.update(__file__=script_file) - -execfile(script_file, new_globals) diff --git a/src/tests/mock/pybitmessage/pyelliptic/__init__.py b/src/tests/mock/pybitmessage/pyelliptic/__init__.py deleted file mode 100644 index cafa89c9..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Copyright (C) 2010 -Author: Yann GUIBET -Contact: - -Python OpenSSL wrapper. -For modern cryptography with ECC, AES, HMAC, Blowfish, ... - -This is an abandoned package maintained inside of the PyBitmessage. -""" - -from .cipher import Cipher -from .ecc import ECC -from .eccblind import ECCBlind -from .eccblindchain import ECCBlindChain -from .hash import hmac_sha256, hmac_sha512, pbkdf2 -from .openssl import OpenSSL - -__version__ = '1.3' - -__all__ = [ - 'OpenSSL', - 'ECC', - 'ECCBlind', - 'ECCBlindChain', - 'Cipher', - 'hmac_sha256', - 'hmac_sha512', - 'pbkdf2' -] diff --git a/src/tests/mock/pybitmessage/pyelliptic/arithmetic.py b/src/tests/mock/pybitmessage/pyelliptic/arithmetic.py deleted file mode 100644 index 23c24b5e..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/arithmetic.py +++ /dev/null @@ -1,166 +0,0 @@ -""" -Arithmetic Expressions -""" -import hashlib -import re - -P = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 - 1 -A = 0 -Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240 -Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424 -G = (Gx, Gy) - - -def inv(a, n): - """Inversion""" - lm, hm = 1, 0 - low, high = a % n, n - while low > 1: - r = high // low - nm, new = hm - lm * r, high - low * r - lm, low, hm, high = nm, new, lm, low - return lm % n - - -def get_code_string(base): - """Returns string according to base value""" - if base == 2: - return b'01' - if base == 10: - return b'0123456789' - if base == 16: - return b'0123456789abcdef' - if base == 58: - return b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' - if base == 256: - try: - return b''.join([chr(x) for x in range(256)]) - except TypeError: - return bytes([x for x in range(256)]) - - raise ValueError("Invalid base!") - - -def encode(val, base, minlen=0): - """Returns the encoded string""" - code_string = get_code_string(base) - result = b'' - while val > 0: - val, i = divmod(val, base) - result = code_string[i:i + 1] + result - if len(result) < minlen: - result = code_string[0:1] * (minlen - len(result)) + result - return result - - -def decode(string, base): - """Returns the decoded string""" - code_string = get_code_string(base) - result = 0 - if base == 16: - string = string.lower() - while string: - result *= base - result += code_string.find(string[0]) - string = string[1:] - return result - - -def changebase(string, frm, to, minlen=0): - """Change base of the string""" - return encode(decode(string, frm), to, minlen) - - -def base10_add(a, b): - """Adding the numbers that are of base10""" - # pylint: disable=too-many-function-args - if a is None: - return b[0], b[1] - if b is None: - return a[0], a[1] - if a[0] == b[0]: - if a[1] == b[1]: - return base10_double(a[0], a[1]) - return None - m = ((b[1] - a[1]) * inv(b[0] - a[0], P)) % P - x = (m * m - a[0] - b[0]) % P - y = (m * (a[0] - x) - a[1]) % P - return (x, y) - - -def base10_double(a): - """Double the numbers that are of base10""" - if a is None: - return None - m = ((3 * a[0] * a[0] + A) * inv(2 * a[1], P)) % P - x = (m * m - 2 * a[0]) % P - y = (m * (a[0] - x) - a[1]) % P - return (x, y) - - -def base10_multiply(a, n): - """Multiply the numbers that are of base10""" - if n == 0: - return G - if n == 1: - return a - n, m = divmod(n, 2) - if m == 0: - return base10_double(base10_multiply(a, n)) - if m == 1: - return base10_add(base10_double(base10_multiply(a, n)), a) - return None - - -def hex_to_point(h): - """Converting hexadecimal to point value""" - return (decode(h[2:66], 16), decode(h[66:], 16)) - - -def point_to_hex(p): - """Converting point value to hexadecimal""" - return b'04' + encode(p[0], 16, 64) + encode(p[1], 16, 64) - - -def multiply(privkey, pubkey): - """Multiplying keys""" - return point_to_hex(base10_multiply( - hex_to_point(pubkey), decode(privkey, 16))) - - -def privtopub(privkey): - """Converting key from private to public""" - return point_to_hex(base10_multiply(G, decode(privkey, 16))) - - -def add(p1, p2): - """Adding two public keys""" - if len(p1) == 32: - return encode(decode(p1, 16) + decode(p2, 16) % P, 16, 32) - return point_to_hex(base10_add(hex_to_point(p1), hex_to_point(p2))) - - -def hash_160(string): - """Hashed version of public key""" - intermed = hashlib.sha256(string).digest() - ripemd160 = hashlib.new('ripemd160') - ripemd160.update(intermed) - return ripemd160.digest() - - -def dbl_sha256(string): - """Double hashing (SHA256)""" - return hashlib.sha256(hashlib.sha256(string).digest()).digest() - - -def bin_to_b58check(inp): - """Convert binary to base58""" - inp_fmtd = '\x00' + inp - leadingzbytes = len(re.match('^\x00*', inp_fmtd).group(0)) - checksum = dbl_sha256(inp_fmtd)[:4] - return '1' * leadingzbytes + changebase(inp_fmtd + checksum, 256, 58) - - -def pubkey_to_address(pubkey): - """Convert a public key (in hex) to a Bitcoin address""" - return bin_to_b58check(hash_160(changebase(pubkey, 16, 256))) diff --git a/src/tests/mock/pybitmessage/pyelliptic/cipher.py b/src/tests/mock/pybitmessage/pyelliptic/cipher.py deleted file mode 100644 index af6c08ca..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/cipher.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -Symmetric Encryption -""" -# Copyright (C) 2011 Yann GUIBET -# See LICENSE for details. - -from .openssl import OpenSSL - - -# pylint: disable=redefined-builtin -class Cipher(object): - """ - Main class for encryption - - import pyelliptic - iv = pyelliptic.Cipher.gen_IV('aes-256-cfb') - ctx = pyelliptic.Cipher("secretkey", iv, 1, ciphername='aes-256-cfb') - ciphertext = ctx.update('test1') - ciphertext += ctx.update('test2') - ciphertext += ctx.final() - - ctx2 = pyelliptic.Cipher("secretkey", iv, 0, ciphername='aes-256-cfb') - print ctx2.ciphering(ciphertext) - """ - def __init__(self, key, iv, do, ciphername='aes-256-cbc'): - """ - do == 1 => Encrypt; do == 0 => Decrypt - """ - self.cipher = OpenSSL.get_cipher(ciphername) - self.ctx = OpenSSL.EVP_CIPHER_CTX_new() - if do == 1 or do == 0: - k = OpenSSL.malloc(key, len(key)) - IV = OpenSSL.malloc(iv, len(iv)) - OpenSSL.EVP_CipherInit_ex( - self.ctx, self.cipher.get_pointer(), 0, k, IV, do) - else: - raise Exception("RTFM ...") - - @staticmethod - def get_all_cipher(): - """ - static method, returns all ciphers available - """ - return OpenSSL.cipher_algo.keys() - - @staticmethod - def get_blocksize(ciphername): - """This Method returns cipher blocksize""" - cipher = OpenSSL.get_cipher(ciphername) - return cipher.get_blocksize() - - @staticmethod - def gen_IV(ciphername): - """Generate random initialization vector""" - cipher = OpenSSL.get_cipher(ciphername) - return OpenSSL.rand(cipher.get_blocksize()) - - def update(self, input): - """Update result with more data""" - i = OpenSSL.c_int(0) - buffer = OpenSSL.malloc(b"", len(input) + self.cipher.get_blocksize()) - inp = OpenSSL.malloc(input, len(input)) - if OpenSSL.EVP_CipherUpdate(self.ctx, OpenSSL.byref(buffer), - OpenSSL.byref(i), inp, len(input)) == 0: - raise Exception("[OpenSSL] EVP_CipherUpdate FAIL ...") - return buffer.raw[0:i.value] # pylint: disable=invalid-slice-index - - def final(self): - """Returning the final value""" - i = OpenSSL.c_int(0) - buffer = OpenSSL.malloc(b"", self.cipher.get_blocksize()) - if (OpenSSL.EVP_CipherFinal_ex(self.ctx, OpenSSL.byref(buffer), - OpenSSL.byref(i))) == 0: - raise Exception("[OpenSSL] EVP_CipherFinal_ex FAIL ...") - return buffer.raw[0:i.value] # pylint: disable=invalid-slice-index - - def ciphering(self, input): - """ - Do update and final in one method - """ - buff = self.update(input) - return buff + self.final() - - def __del__(self): - # pylint: disable=protected-access - if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: - OpenSSL.EVP_CIPHER_CTX_reset(self.ctx) - else: - OpenSSL.EVP_CIPHER_CTX_cleanup(self.ctx) - OpenSSL.EVP_CIPHER_CTX_free(self.ctx) diff --git a/src/tests/mock/pybitmessage/pyelliptic/ecc.py b/src/tests/mock/pybitmessage/pyelliptic/ecc.py deleted file mode 100644 index 388227c7..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/ecc.py +++ /dev/null @@ -1,501 +0,0 @@ -""" -Asymmetric cryptography using elliptic curves -""" -# pylint: disable=protected-access, too-many-branches, too-many-locals -# Copyright (C) 2011 Yann GUIBET -# See LICENSE for details. - -from hashlib import sha512 -from struct import pack, unpack - -from .cipher import Cipher -from .hash import equals, hmac_sha256 -from .openssl import OpenSSL - - -class ECC(object): - """ - Asymmetric encryption with Elliptic Curve Cryptography (ECC) - ECDH, ECDSA and ECIES - - >>> import pyelliptic - - >>> alice = pyelliptic.ECC() # default curve: sect283r1 - >>> bob = pyelliptic.ECC(curve='sect571r1') - - >>> ciphertext = alice.encrypt("Hello Bob", bob.get_pubkey()) - >>> print bob.decrypt(ciphertext) - - >>> signature = bob.sign("Hello Alice") - >>> # alice's job : - >>> print pyelliptic.ECC( - >>> pubkey=bob.get_pubkey()).verify(signature, "Hello Alice") - - >>> # ERROR !!! - >>> try: - >>> key = alice.get_ecdh_key(bob.get_pubkey()) - >>> except: - >>> print("For ECDH key agreement, the keys must be defined on the same curve !") - - >>> alice = pyelliptic.ECC(curve='sect571r1') - >>> print alice.get_ecdh_key(bob.get_pubkey()).encode('hex') - >>> print bob.get_ecdh_key(alice.get_pubkey()).encode('hex') - - """ - - def __init__( - self, - pubkey=None, - privkey=None, - pubkey_x=None, - pubkey_y=None, - raw_privkey=None, - curve='sect283r1', - ): # pylint: disable=too-many-arguments - """ - For a normal and High level use, specifie pubkey, - privkey (if you need) and the curve - """ - if isinstance(curve, str): - self.curve = OpenSSL.get_curve(curve) - else: - self.curve = curve - - if pubkey_x is not None and pubkey_y is not None: - self._set_keys(pubkey_x, pubkey_y, raw_privkey) - elif pubkey is not None: - curve, pubkey_x, pubkey_y, _ = ECC._decode_pubkey(pubkey) - if privkey is not None: - curve2, raw_privkey, _ = ECC._decode_privkey(privkey) - if curve != curve2: - raise Exception("Bad ECC keys ...") - self.curve = curve - self._set_keys(pubkey_x, pubkey_y, raw_privkey) - else: - self.privkey, self.pubkey_x, self.pubkey_y = self._generate() - - def _set_keys(self, pubkey_x, pubkey_y, privkey): - if self.raw_check_key(privkey, pubkey_x, pubkey_y) < 0: - self.pubkey_x = None - self.pubkey_y = None - self.privkey = None - raise Exception("Bad ECC keys ...") - else: - self.pubkey_x = pubkey_x - self.pubkey_y = pubkey_y - self.privkey = privkey - - @staticmethod - def get_curves(): - """ - static method, returns the list of all the curves available - """ - return OpenSSL.curves.keys() - - def get_curve(self): - """Encryption object from curve name""" - return OpenSSL.get_curve_by_id(self.curve) - - def get_curve_id(self): - """Currently used curve""" - return self.curve - - def get_pubkey(self): - """ - High level function which returns : - curve(2) + len_of_pubkeyX(2) + pubkeyX + len_of_pubkeyY + pubkeyY - """ - return b''.join(( - pack('!H', self.curve), - pack('!H', len(self.pubkey_x)), - self.pubkey_x, - pack('!H', len(self.pubkey_y)), - self.pubkey_y, - )) - - def get_privkey(self): - """ - High level function which returns - curve(2) + len_of_privkey(2) + privkey - """ - return b''.join(( - pack('!H', self.curve), - pack('!H', len(self.privkey)), - self.privkey, - )) - - @staticmethod - def _decode_pubkey(pubkey): - i = 0 - curve = unpack('!H', pubkey[i:i + 2])[0] - i += 2 - tmplen = unpack('!H', pubkey[i:i + 2])[0] - i += 2 - pubkey_x = pubkey[i:i + tmplen] - i += tmplen - tmplen = unpack('!H', pubkey[i:i + 2])[0] - i += 2 - pubkey_y = pubkey[i:i + tmplen] - i += tmplen - return curve, pubkey_x, pubkey_y, i - - @staticmethod - def _decode_privkey(privkey): - i = 0 - curve = unpack('!H', privkey[i:i + 2])[0] - i += 2 - tmplen = unpack('!H', privkey[i:i + 2])[0] - i += 2 - privkey = privkey[i:i + tmplen] - i += tmplen - return curve, privkey, i - - def _generate(self): - try: - pub_key_x = OpenSSL.BN_new() - pub_key_y = OpenSSL.BN_new() - - key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) - if key == 0: - raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") - if (OpenSSL.EC_KEY_generate_key(key)) == 0: - raise Exception("[OpenSSL] EC_KEY_generate_key FAIL ...") - if (OpenSSL.EC_KEY_check_key(key)) == 0: - raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") - priv_key = OpenSSL.EC_KEY_get0_private_key(key) - - group = OpenSSL.EC_KEY_get0_group(key) - pub_key = OpenSSL.EC_KEY_get0_public_key(key) - - if OpenSSL.EC_POINT_get_affine_coordinates_GFp( - group, pub_key, pub_key_x, pub_key_y, 0) == 0: - raise Exception( - "[OpenSSL] EC_POINT_get_affine_coordinates_GFp FAIL ...") - - privkey = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(priv_key)) - pubkeyx = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(pub_key_x)) - pubkeyy = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(pub_key_y)) - OpenSSL.BN_bn2bin(priv_key, privkey) - privkey = privkey.raw - OpenSSL.BN_bn2bin(pub_key_x, pubkeyx) - pubkeyx = pubkeyx.raw - OpenSSL.BN_bn2bin(pub_key_y, pubkeyy) - pubkeyy = pubkeyy.raw - self.raw_check_key(privkey, pubkeyx, pubkeyy) - - return privkey, pubkeyx, pubkeyy - - finally: - OpenSSL.EC_KEY_free(key) - OpenSSL.BN_free(pub_key_x) - OpenSSL.BN_free(pub_key_y) - - def get_ecdh_key(self, pubkey): - """ - High level function. Compute public key with the local private key - and returns a 512bits shared key - """ - curve, pubkey_x, pubkey_y, _ = ECC._decode_pubkey(pubkey) - if curve != self.curve: - raise Exception("ECC keys must be from the same curve !") - return sha512(self.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest() - - def raw_get_ecdh_key(self, pubkey_x, pubkey_y): - """ECDH key as binary data""" - try: - ecdh_keybuffer = OpenSSL.malloc(0, 32) - - other_key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) - if other_key == 0: - raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") - - other_pub_key_x = OpenSSL.BN_bin2bn(pubkey_x, len(pubkey_x), 0) - other_pub_key_y = OpenSSL.BN_bin2bn(pubkey_y, len(pubkey_y), 0) - - other_group = OpenSSL.EC_KEY_get0_group(other_key) - other_pub_key = OpenSSL.EC_POINT_new(other_group) - - if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(other_group, - other_pub_key, - other_pub_key_x, - other_pub_key_y, - 0)) == 0: - raise Exception( - "[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...") - if (OpenSSL.EC_KEY_set_public_key(other_key, other_pub_key)) == 0: - raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...") - if (OpenSSL.EC_KEY_check_key(other_key)) == 0: - raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") - - own_key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) - if own_key == 0: - raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") - own_priv_key = OpenSSL.BN_bin2bn( - self.privkey, len(self.privkey), 0) - - if (OpenSSL.EC_KEY_set_private_key(own_key, own_priv_key)) == 0: - raise Exception("[OpenSSL] EC_KEY_set_private_key FAIL ...") - - if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: - OpenSSL.EC_KEY_set_method(own_key, OpenSSL.EC_KEY_OpenSSL()) - else: - OpenSSL.ECDH_set_method(own_key, OpenSSL.ECDH_OpenSSL()) - ecdh_keylen = OpenSSL.ECDH_compute_key( - ecdh_keybuffer, 32, other_pub_key, own_key, 0) - - if ecdh_keylen != 32: - raise Exception("[OpenSSL] ECDH keylen FAIL ...") - - return ecdh_keybuffer.raw - - finally: - OpenSSL.EC_KEY_free(other_key) - OpenSSL.BN_free(other_pub_key_x) - OpenSSL.BN_free(other_pub_key_y) - OpenSSL.EC_POINT_free(other_pub_key) - OpenSSL.EC_KEY_free(own_key) - OpenSSL.BN_free(own_priv_key) - - def check_key(self, privkey, pubkey): - """ - Check the public key and the private key. - The private key is optional (replace by None) - """ - curve, pubkey_x, pubkey_y, _ = ECC._decode_pubkey(pubkey) - if privkey is None: - raw_privkey = None - curve2 = curve - else: - curve2, raw_privkey, _ = ECC._decode_privkey(privkey) - if curve != curve2: - raise Exception("Bad public and private key") - return self.raw_check_key(raw_privkey, pubkey_x, pubkey_y, curve) - - def raw_check_key(self, privkey, pubkey_x, pubkey_y, curve=None): - """Check key validity, key is supplied as binary data""" - if curve is None: - curve = self.curve - elif isinstance(curve, str): - curve = OpenSSL.get_curve(curve) - else: - curve = curve - try: - key = OpenSSL.EC_KEY_new_by_curve_name(curve) - if key == 0: - raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") - if privkey is not None: - priv_key = OpenSSL.BN_bin2bn(privkey, len(privkey), 0) - pub_key_x = OpenSSL.BN_bin2bn(pubkey_x, len(pubkey_x), 0) - pub_key_y = OpenSSL.BN_bin2bn(pubkey_y, len(pubkey_y), 0) - - if privkey is not None: - if (OpenSSL.EC_KEY_set_private_key(key, priv_key)) == 0: - raise Exception( - "[OpenSSL] EC_KEY_set_private_key FAIL ...") - - group = OpenSSL.EC_KEY_get0_group(key) - pub_key = OpenSSL.EC_POINT_new(group) - - if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(group, pub_key, - pub_key_x, - pub_key_y, - 0)) == 0: - raise Exception( - "[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...") - if (OpenSSL.EC_KEY_set_public_key(key, pub_key)) == 0: - raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...") - if (OpenSSL.EC_KEY_check_key(key)) == 0: - raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") - return 0 - - finally: - OpenSSL.EC_KEY_free(key) - OpenSSL.BN_free(pub_key_x) - OpenSSL.BN_free(pub_key_y) - OpenSSL.EC_POINT_free(pub_key) - if privkey is not None: - OpenSSL.BN_free(priv_key) - - def sign(self, inputb, digest_alg=OpenSSL.digest_ecdsa_sha1): - """ - Sign the input with ECDSA method and returns the signature - """ - try: - size = len(inputb) - buff = OpenSSL.malloc(inputb, size) - digest = OpenSSL.malloc(0, 64) - if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: - md_ctx = OpenSSL.EVP_MD_CTX_new() - else: - md_ctx = OpenSSL.EVP_MD_CTX_create() - dgst_len = OpenSSL.pointer(OpenSSL.c_int(0)) - siglen = OpenSSL.pointer(OpenSSL.c_int(0)) - sig = OpenSSL.malloc(0, 151) - - key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) - if key == 0: - raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") - - priv_key = OpenSSL.BN_bin2bn(self.privkey, len(self.privkey), 0) - pub_key_x = OpenSSL.BN_bin2bn(self.pubkey_x, len(self.pubkey_x), 0) - pub_key_y = OpenSSL.BN_bin2bn(self.pubkey_y, len(self.pubkey_y), 0) - - if (OpenSSL.EC_KEY_set_private_key(key, priv_key)) == 0: - raise Exception("[OpenSSL] EC_KEY_set_private_key FAIL ...") - - group = OpenSSL.EC_KEY_get0_group(key) - pub_key = OpenSSL.EC_POINT_new(group) - - if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(group, pub_key, - pub_key_x, - pub_key_y, - 0)) == 0: - raise Exception( - "[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...") - if (OpenSSL.EC_KEY_set_public_key(key, pub_key)) == 0: - raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...") - if (OpenSSL.EC_KEY_check_key(key)) == 0: - raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") - - if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: - OpenSSL.EVP_MD_CTX_new(md_ctx) - else: - OpenSSL.EVP_MD_CTX_init(md_ctx) - OpenSSL.EVP_DigestInit_ex(md_ctx, digest_alg(), None) - - if (OpenSSL.EVP_DigestUpdate(md_ctx, buff, size)) == 0: - raise Exception("[OpenSSL] EVP_DigestUpdate FAIL ...") - OpenSSL.EVP_DigestFinal_ex(md_ctx, digest, dgst_len) - OpenSSL.ECDSA_sign(0, digest, dgst_len.contents, sig, siglen, key) - if (OpenSSL.ECDSA_verify(0, digest, dgst_len.contents, sig, - siglen.contents, key)) != 1: - raise Exception("[OpenSSL] ECDSA_verify FAIL ...") - - return sig.raw[:siglen.contents.value] - - finally: - OpenSSL.EC_KEY_free(key) - OpenSSL.BN_free(pub_key_x) - OpenSSL.BN_free(pub_key_y) - OpenSSL.BN_free(priv_key) - OpenSSL.EC_POINT_free(pub_key) - if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: - OpenSSL.EVP_MD_CTX_free(md_ctx) - else: - OpenSSL.EVP_MD_CTX_destroy(md_ctx) - - def verify(self, sig, inputb, digest_alg=OpenSSL.digest_ecdsa_sha1): - """ - Verify the signature with the input and the local public key. - Returns a boolean - """ - try: - bsig = OpenSSL.malloc(sig, len(sig)) - binputb = OpenSSL.malloc(inputb, len(inputb)) - digest = OpenSSL.malloc(0, 64) - dgst_len = OpenSSL.pointer(OpenSSL.c_int(0)) - if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: - md_ctx = OpenSSL.EVP_MD_CTX_new() - else: - md_ctx = OpenSSL.EVP_MD_CTX_create() - key = OpenSSL.EC_KEY_new_by_curve_name(self.curve) - - if key == 0: - raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...") - - pub_key_x = OpenSSL.BN_bin2bn(self.pubkey_x, len(self.pubkey_x), 0) - pub_key_y = OpenSSL.BN_bin2bn(self.pubkey_y, len(self.pubkey_y), 0) - group = OpenSSL.EC_KEY_get0_group(key) - pub_key = OpenSSL.EC_POINT_new(group) - - if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(group, pub_key, - pub_key_x, - pub_key_y, - 0)) == 0: - raise Exception( - "[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...") - if (OpenSSL.EC_KEY_set_public_key(key, pub_key)) == 0: - raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...") - if (OpenSSL.EC_KEY_check_key(key)) == 0: - raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...") - if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: - OpenSSL.EVP_MD_CTX_new(md_ctx) - else: - OpenSSL.EVP_MD_CTX_init(md_ctx) - OpenSSL.EVP_DigestInit_ex(md_ctx, digest_alg(), None) - if (OpenSSL.EVP_DigestUpdate(md_ctx, binputb, len(inputb))) == 0: - raise Exception("[OpenSSL] EVP_DigestUpdate FAIL ...") - - OpenSSL.EVP_DigestFinal_ex(md_ctx, digest, dgst_len) - ret = OpenSSL.ECDSA_verify( - 0, digest, dgst_len.contents, bsig, len(sig), key) - - if ret == -1: - # Fail to Check - return False - if ret == 0: - # Bad signature ! - return False - # Good - return True - - finally: - OpenSSL.EC_KEY_free(key) - OpenSSL.BN_free(pub_key_x) - OpenSSL.BN_free(pub_key_y) - OpenSSL.EC_POINT_free(pub_key) - if OpenSSL._hexversion > 0x10100000 and not OpenSSL._libreSSL: - OpenSSL.EVP_MD_CTX_free(md_ctx) - else: - OpenSSL.EVP_MD_CTX_destroy(md_ctx) - - @staticmethod - def encrypt(data, pubkey, ephemcurve=None, ciphername='aes-256-cbc'): - """ - Encrypt data with ECIES method using the public key of the recipient. - """ - curve, pubkey_x, pubkey_y, _ = ECC._decode_pubkey(pubkey) - return ECC.raw_encrypt(data, pubkey_x, pubkey_y, curve=curve, - ephemcurve=ephemcurve, ciphername=ciphername) - - @staticmethod - def raw_encrypt( - data, - pubkey_x, - pubkey_y, - curve='sect283r1', - ephemcurve=None, - ciphername='aes-256-cbc', - ): # pylint: disable=too-many-arguments - """ECHD encryption, keys supplied in binary data format""" - - if ephemcurve is None: - ephemcurve = curve - ephem = ECC(curve=ephemcurve) - key = sha512(ephem.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest() - key_e, key_m = key[:32], key[32:] - pubkey = ephem.get_pubkey() - iv = OpenSSL.rand(OpenSSL.get_cipher(ciphername).get_blocksize()) - ctx = Cipher(key_e, iv, 1, ciphername) - ciphertext = iv + pubkey + ctx.ciphering(data) - mac = hmac_sha256(key_m, ciphertext) - return ciphertext + mac - - def decrypt(self, data, ciphername='aes-256-cbc'): - """ - Decrypt data with ECIES method using the local private key - """ - blocksize = OpenSSL.get_cipher(ciphername).get_blocksize() - iv = data[:blocksize] - i = blocksize - _, pubkey_x, pubkey_y, i2 = ECC._decode_pubkey(data[i:]) - i += i2 - ciphertext = data[i:len(data) - 32] - i += len(ciphertext) - mac = data[i:] - key = sha512(self.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest() - key_e, key_m = key[:32], key[32:] - if not equals(hmac_sha256(key_m, data[:len(data) - 32]), mac): - raise RuntimeError("Fail to verify data") - ctx = Cipher(key_e, iv, 0, ciphername) - return ctx.ciphering(ciphertext) diff --git a/src/tests/mock/pybitmessage/pyelliptic/eccblind.py b/src/tests/mock/pybitmessage/pyelliptic/eccblind.py deleted file mode 100644 index 83bc7632..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/eccblind.py +++ /dev/null @@ -1,373 +0,0 @@ -""" -ECC blind signature functionality based on -"An Efficient Blind Signature Scheme -Based on the Elliptic CurveDiscrete Logarithm Problem" by Morteza Nikooghadama - and Ali Zakerolhosseini , -http://www.isecure-journal.com/article_39171_47f9ec605dd3918c2793565ec21fcd7a.pdf -""" - -# variable names are based on the math in the paper, so they don't conform -# to PEP8 - -import time -from hashlib import sha256 -from struct import pack, unpack - -from .openssl import OpenSSL - -# first byte in serialisation can contain data -Y_BIT = 0x01 -COMPRESSED_BIT = 0x02 - -# formats -BIGNUM = '!32s' -EC = '!B32s' -PUBKEY = '!BB33s' - - -class Expiration(object): - """Expiration of pubkey""" - @staticmethod - def deserialize(val): - """Create an object out of int""" - year = ((val & 0xF0) >> 4) + 2020 - month = val & 0x0F - assert month < 12 - return Expiration(year, month) - - def __init__(self, year, month): - assert isinstance(year, int) - assert year > 2019 and year < 2036 - assert isinstance(month, int) - assert month < 12 - self.year = year - self.month = month - self.exp = year + month / 12.0 - - def serialize(self): - """Make int out of object""" - return ((self.year - 2020) << 4) + self.month - - def verify(self): - """Check if the pubkey has expired""" - now = time.gmtime() - return self.exp >= now.tm_year + (now.tm_mon - 1) / 12.0 - - -class Value(object): - """Value of a pubkey""" - @staticmethod - def deserialize(val): - """Make object out of int""" - return Value(val) - - def __init__(self, value=0xFF): - assert isinstance(value, int) - self.value = value - - def serialize(self): - """Make int out of object""" - return self.value & 0xFF - - def verify(self, value): - """Verify against supplied value""" - return value <= self.value - - -class ECCBlind(object): # pylint: disable=too-many-instance-attributes - """ - Class for ECC blind signature functionality - """ - - # init - k = None - R = None - F = None - d = None - Q = None - a = None - b = None - c = None - binv = None - r = None - m = None - m_ = None - s_ = None - signature = None - exp = None - val = None - - def ec_get_random(self): - """ - Random integer within the EC order - """ - randomnum = OpenSSL.BN_new() - OpenSSL.BN_rand(randomnum, OpenSSL.BN_num_bits(self.n), 0, 0) - return randomnum - - def ec_invert(self, a): - """ - ECC inversion - """ - inverse = OpenSSL.BN_mod_inverse(0, a, self.n, self.ctx) - return inverse - - def ec_gen_keypair(self): - """ - Generate an ECC keypair - We're using compressed keys - """ - d = self.ec_get_random() - Q = OpenSSL.EC_POINT_new(self.group) - OpenSSL.EC_POINT_mul(self.group, Q, d, 0, 0, 0) - return (d, Q) - - def ec_Ftor(self, F): - """ - x0 coordinate of F - """ - # F = (x0, y0) - x0 = OpenSSL.BN_new() - y0 = OpenSSL.BN_new() - OpenSSL.EC_POINT_get_affine_coordinates(self.group, F, x0, y0, self.ctx) - OpenSSL.BN_free(y0) - return x0 - - def _ec_point_serialize(self, point): - """Make an EC point into a string""" - try: - x = OpenSSL.BN_new() - y = OpenSSL.BN_new() - OpenSSL.EC_POINT_get_affine_coordinates( - self.group, point, x, y, 0) - y_byte = (OpenSSL.BN_is_odd(y) & Y_BIT) | COMPRESSED_BIT - l_ = OpenSSL.BN_num_bytes(self.n) - try: - bx = OpenSSL.malloc(0, l_) - OpenSSL.BN_bn2binpad(x, bx, l_) - out = bx.raw - except AttributeError: - # padding manually - bx = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(x)) - OpenSSL.BN_bn2bin(x, bx) - out = bx.raw.rjust(l_, b'\x00') - return pack(EC, y_byte, out) - - finally: - OpenSSL.BN_clear_free(x) - OpenSSL.BN_clear_free(y) - - def _ec_point_deserialize(self, data): - """Make a string into an EC point""" - y_bit, x_raw = unpack(EC, data) - x = OpenSSL.BN_bin2bn(x_raw, OpenSSL.BN_num_bytes(self.n), 0) - y_bit &= Y_BIT - retval = OpenSSL.EC_POINT_new(self.group) - OpenSSL.EC_POINT_set_compressed_coordinates(self.group, - retval, - x, - y_bit, - self.ctx) - return retval - - def _bn_serialize(self, bn): - """Make a string out of BigNum""" - l_ = OpenSSL.BN_num_bytes(self.n) - try: - o = OpenSSL.malloc(0, l_) - OpenSSL.BN_bn2binpad(bn, o, l_) - return o.raw - except AttributeError: - o = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(bn)) - OpenSSL.BN_bn2bin(bn, o) - return o.raw.rjust(l_, b'\x00') - - def _bn_deserialize(self, data): - """Make a BigNum out of string""" - x = OpenSSL.BN_bin2bn(data, OpenSSL.BN_num_bytes(self.n), 0) - return x - - def _init_privkey(self, privkey): - """Initialise private key out of string/bytes""" - self.d = self._bn_deserialize(privkey) - - def privkey(self): - """Make a private key into a string""" - return pack(BIGNUM, self.d) - - def _init_pubkey(self, pubkey): - """Initialise pubkey out of string/bytes""" - unpacked = unpack(PUBKEY, pubkey) - self.expiration = Expiration.deserialize(unpacked[0]) - self.value = Value.deserialize(unpacked[1]) - self.Q = self._ec_point_deserialize(unpacked[2]) - - def pubkey(self): - """Make a pubkey into a string""" - return pack(PUBKEY, self.expiration.serialize(), - self.value.serialize(), - self._ec_point_serialize(self.Q)) - - def __init__(self, curve="secp256k1", pubkey=None, privkey=None, # pylint: disable=too-many-arguments - year=2025, month=11, value=0xFF): - self.ctx = OpenSSL.BN_CTX_new() - - # ECC group - self.group = OpenSSL.EC_GROUP_new_by_curve_name( - OpenSSL.get_curve(curve)) - - # Order n - self.n = OpenSSL.BN_new() - OpenSSL.EC_GROUP_get_order(self.group, self.n, self.ctx) - - # Generator G - self.G = OpenSSL.EC_GROUP_get0_generator(self.group) - - # Identity O (infinity) - self.iO = OpenSSL.EC_POINT_new(self.group) - OpenSSL.EC_POINT_set_to_infinity(self.group, self.iO) - - if privkey: - assert pubkey - # load both pubkey and privkey from bytes - self._init_privkey(privkey) - self._init_pubkey(pubkey) - elif pubkey: - # load pubkey from bytes - self._init_pubkey(pubkey) - else: - # new keypair - self.d, self.Q = self.ec_gen_keypair() - if not year or not month: - now = time.gmtime() - if now.tm_mon == 12: - self.expiration = Expiration(now.tm_year + 1, 1) - else: - self.expiration = Expiration(now.tm_year, now.tm_mon + 1) - else: - self.expiration = Expiration(year, month) - self.value = Value(value) - - def __del__(self): - OpenSSL.BN_free(self.n) - OpenSSL.BN_CTX_free(self.ctx) - - def signer_init(self): - """ - Init signer - """ - # Signer: Random integer k - self.k = self.ec_get_random() - - # R = kG - self.R = OpenSSL.EC_POINT_new(self.group) - OpenSSL.EC_POINT_mul(self.group, self.R, self.k, 0, 0, 0) - - return self._ec_point_serialize(self.R) - - def create_signing_request(self, R, msg): - """ - Requester creates a new signing request - """ - self.R = self._ec_point_deserialize(R) - msghash = sha256(msg).digest() - - # Requester: 3 random blinding factors - self.F = OpenSSL.EC_POINT_new(self.group) - OpenSSL.EC_POINT_set_to_infinity(self.group, self.F) - temp = OpenSSL.EC_POINT_new(self.group) - abinv = OpenSSL.BN_new() - - # F != O - while OpenSSL.EC_POINT_cmp(self.group, self.F, self.iO, self.ctx) == 0: - self.a = self.ec_get_random() - self.b = self.ec_get_random() - self.c = self.ec_get_random() - - # F = b^-1 * R... - self.binv = self.ec_invert(self.b) - OpenSSL.EC_POINT_mul(self.group, temp, 0, self.R, self.binv, 0) - OpenSSL.EC_POINT_copy(self.F, temp) - - # ... + a*b^-1 * Q... - OpenSSL.BN_mul(abinv, self.a, self.binv, self.ctx) - OpenSSL.EC_POINT_mul(self.group, temp, 0, self.Q, abinv, 0) - OpenSSL.EC_POINT_add(self.group, self.F, self.F, temp, 0) - - # ... + c*G - OpenSSL.EC_POINT_mul(self.group, temp, 0, self.G, self.c, 0) - OpenSSL.EC_POINT_add(self.group, self.F, self.F, temp, 0) - - # F = (x0, y0) - self.r = self.ec_Ftor(self.F) - - # Requester: Blinding (m' = br(m) + a) - self.m = OpenSSL.BN_new() - OpenSSL.BN_bin2bn(msghash, len(msghash), self.m) - - self.m_ = OpenSSL.BN_new() - OpenSSL.BN_mod_mul(self.m_, self.b, self.r, self.n, self.ctx) - OpenSSL.BN_mod_mul(self.m_, self.m_, self.m, self.n, self.ctx) - OpenSSL.BN_mod_add(self.m_, self.m_, self.a, self.n, self.ctx) - return self._bn_serialize(self.m_) - - def blind_sign(self, m_): - """ - Signer blind-signs the request - """ - self.m_ = self._bn_deserialize(m_) - self.s_ = OpenSSL.BN_new() - OpenSSL.BN_mod_mul(self.s_, self.d, self.m_, self.n, self.ctx) - OpenSSL.BN_mod_add(self.s_, self.s_, self.k, self.n, self.ctx) - OpenSSL.BN_free(self.k) - return self._bn_serialize(self.s_) - - def unblind(self, s_): - """ - Requester unblinds the signature - """ - self.s_ = self._bn_deserialize(s_) - s = OpenSSL.BN_new() - OpenSSL.BN_mod_mul(s, self.binv, self.s_, self.n, self.ctx) - OpenSSL.BN_mod_add(s, s, self.c, self.n, self.ctx) - OpenSSL.BN_free(self.a) - OpenSSL.BN_free(self.b) - OpenSSL.BN_free(self.c) - self.signature = (s, self.F) - return self._bn_serialize(s) + self._ec_point_serialize(self.F) - - def verify(self, msg, signature, value=1): - """ - Verify signature with certifier's pubkey - """ - - # convert msg to BIGNUM - self.m = OpenSSL.BN_new() - msghash = sha256(msg).digest() - OpenSSL.BN_bin2bn(msghash, len(msghash), self.m) - - # init - s, self.F = (self._bn_deserialize(signature[0:32]), - self._ec_point_deserialize(signature[32:])) - if self.r is None: - self.r = self.ec_Ftor(self.F) - - lhs = OpenSSL.EC_POINT_new(self.group) - rhs = OpenSSL.EC_POINT_new(self.group) - - OpenSSL.EC_POINT_mul(self.group, lhs, s, 0, 0, 0) - - OpenSSL.EC_POINT_mul(self.group, rhs, 0, self.Q, self.m, 0) - OpenSSL.EC_POINT_mul(self.group, rhs, 0, rhs, self.r, 0) - OpenSSL.EC_POINT_add(self.group, rhs, rhs, self.F, self.ctx) - - retval = OpenSSL.EC_POINT_cmp(self.group, lhs, rhs, self.ctx) - if retval == -1: - raise RuntimeError("EC_POINT_cmp returned an error") - elif not self.value.verify(value): - return False - elif not self.expiration.verify(): - return False - elif retval != 0: - return False - return True diff --git a/src/tests/mock/pybitmessage/pyelliptic/eccblindchain.py b/src/tests/mock/pybitmessage/pyelliptic/eccblindchain.py deleted file mode 100644 index 56e8ce2a..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/eccblindchain.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Blind signature chain with a top level CA -""" - -from .eccblind import ECCBlind - - -class ECCBlindChain(object): # pylint: disable=too-few-public-methods - """ - # Class for ECC Blind Chain signature functionality - """ - - def __init__(self, ca=None, chain=None): - self.chain = [] - self.ca = [] - if ca: - for i in range(0, len(ca), 35): - self.ca.append(ca[i:i + 35]) - if chain: - self.chain.append(chain[0:35]) - for i in range(35, len(chain), 100): - if len(chain[i:]) == 65: - self.chain.append(chain[i:i + 65]) - else: - self.chain.append(chain[i:i + 100]) - - def verify(self, msg, value): - """Verify a chain provides supplied message and value""" - parent = None - l_ = 0 - for level in self.chain: - l_ += 1 - pubkey = None - signature = None - if len(level) == 100: - pubkey, signature = (level[0:35], level[35:]) - elif len(level) == 35: - if level not in self.ca: - return False - parent = level - continue - else: - signature = level - verifier_obj = ECCBlind(pubkey=parent) - if pubkey: - if not verifier_obj.verify(pubkey, signature, value): - return False - parent = pubkey - else: - return verifier_obj.verify(msg=msg, signature=signature, - value=value) - return None diff --git a/src/tests/mock/pybitmessage/pyelliptic/hash.py b/src/tests/mock/pybitmessage/pyelliptic/hash.py deleted file mode 100644 index 70c9a6ce..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/hash.py +++ /dev/null @@ -1,70 +0,0 @@ -""" -Wrappers for hash functions from OpenSSL. -""" -# Copyright (C) 2011 Yann GUIBET -# See LICENSE for details. - -from .openssl import OpenSSL - - -# For python3 -def _equals_bytes(a, b): - if len(a) != len(b): - return False - result = 0 - for x, y in zip(a, b): - result |= x ^ y - return result == 0 - - -def _equals_str(a, b): - if len(a) != len(b): - return False - result = 0 - for x, y in zip(a, b): - result |= ord(x) ^ ord(y) - return result == 0 - - -def equals(a, b): - """Compare two strings or bytearrays""" - if isinstance(a, str): - return _equals_str(a, b) - return _equals_bytes(a, b) - - -def hmac_sha256(k, m): - """ - Compute the key and the message with HMAC SHA5256 - """ - key = OpenSSL.malloc(k, len(k)) - d = OpenSSL.malloc(m, len(m)) - md = OpenSSL.malloc(0, 32) - i = OpenSSL.pointer(OpenSSL.c_int(0)) - OpenSSL.HMAC(OpenSSL.EVP_sha256(), key, len(k), d, len(m), md, i) - return md.raw - - -def hmac_sha512(k, m): - """ - Compute the key and the message with HMAC SHA512 - """ - key = OpenSSL.malloc(k, len(k)) - d = OpenSSL.malloc(m, len(m)) - md = OpenSSL.malloc(0, 64) - i = OpenSSL.pointer(OpenSSL.c_int(0)) - OpenSSL.HMAC(OpenSSL.EVP_sha512(), key, len(k), d, len(m), md, i) - return md.raw - - -def pbkdf2(password, salt=None, i=10000, keylen=64): - """Key derivation function using SHA256""" - if salt is None: - salt = OpenSSL.rand(8) - p_password = OpenSSL.malloc(password, len(password)) - p_salt = OpenSSL.malloc(salt, len(salt)) - output = OpenSSL.malloc(0, keylen) - OpenSSL.PKCS5_PBKDF2_HMAC(p_password, len(password), p_salt, - len(p_salt), i, OpenSSL.EVP_sha256(), - keylen, output) - return salt, output.raw diff --git a/src/tests/mock/pybitmessage/pyelliptic/openssl.py b/src/tests/mock/pybitmessage/pyelliptic/openssl.py deleted file mode 100644 index abc6ac13..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/openssl.py +++ /dev/null @@ -1,803 +0,0 @@ -# Copyright (C) 2011 Yann GUIBET -# See LICENSE for details. -# -# Software slightly changed by Jonathan Warren -""" -This module loads openssl libs with ctypes and incapsulates -needed openssl functionality in class _OpenSSL. -""" -import ctypes -import sys - -# pylint: disable=protected-access - -OpenSSL = None - - -class CipherName(object): - """Class returns cipher name, pointer and blocksize""" - - def __init__(self, name, pointer, blocksize): - self._name = name - self._pointer = pointer - self._blocksize = blocksize - - def __str__(self): - return "Cipher : " + self._name + \ - " | Blocksize : " + str(self._blocksize) + \ - " | Function pointer : " + str(self._pointer) - - def get_pointer(self): - """This method returns cipher pointer""" - return self._pointer() - - def get_name(self): - """This method returns cipher name""" - return self._name - - def get_blocksize(self): - """This method returns cipher blocksize""" - return self._blocksize - - -def get_version(library): - """This function return version, hexversion and cflages""" - version = None - hexversion = None - cflags = None - try: - # OpenSSL 1.1 - OPENSSL_VERSION = 0 - OPENSSL_CFLAGS = 1 - library.OpenSSL_version.argtypes = [ctypes.c_int] - library.OpenSSL_version.restype = ctypes.c_char_p - version = library.OpenSSL_version(OPENSSL_VERSION) - cflags = library.OpenSSL_version(OPENSSL_CFLAGS) - library.OpenSSL_version_num.restype = ctypes.c_long - hexversion = library.OpenSSL_version_num() - except AttributeError: - try: - # OpenSSL 1.0 - SSLEAY_VERSION = 0 - SSLEAY_CFLAGS = 2 - library.SSLeay.restype = ctypes.c_long - library.SSLeay_version.restype = ctypes.c_char_p - library.SSLeay_version.argtypes = [ctypes.c_int] - version = library.SSLeay_version(SSLEAY_VERSION) - cflags = library.SSLeay_version(SSLEAY_CFLAGS) - hexversion = library.SSLeay() - except AttributeError: - # raise NotImplementedError('Cannot determine version of this OpenSSL library.') - pass - return (version, hexversion, cflags) - - -class _OpenSSL(object): - """ - Wrapper for OpenSSL using ctypes - """ - # pylint: disable=too-many-statements, too-many-instance-attributes - def __init__(self, library): - """ - Build the wrapper - """ - self._lib = ctypes.CDLL(library) - self._version, self._hexversion, self._cflags = get_version(self._lib) - self._libreSSL = self._version.startswith(b"LibreSSL") - - self.pointer = ctypes.pointer - self.c_int = ctypes.c_int - self.byref = ctypes.byref - self.create_string_buffer = ctypes.create_string_buffer - - self.BN_new = self._lib.BN_new - self.BN_new.restype = ctypes.c_void_p - self.BN_new.argtypes = [] - - self.BN_free = self._lib.BN_free - self.BN_free.restype = None - self.BN_free.argtypes = [ctypes.c_void_p] - - self.BN_clear_free = self._lib.BN_clear_free - self.BN_clear_free.restype = None - self.BN_clear_free.argtypes = [ctypes.c_void_p] - - self.BN_num_bits = self._lib.BN_num_bits - self.BN_num_bits.restype = ctypes.c_int - self.BN_num_bits.argtypes = [ctypes.c_void_p] - - self.BN_bn2bin = self._lib.BN_bn2bin - self.BN_bn2bin.restype = ctypes.c_int - self.BN_bn2bin.argtypes = [ctypes.c_void_p, ctypes.c_void_p] - - try: - self.BN_bn2binpad = self._lib.BN_bn2binpad - self.BN_bn2binpad.restype = ctypes.c_int - self.BN_bn2binpad.argtypes = [ctypes.c_void_p, ctypes.c_void_p, - ctypes.c_int] - except AttributeError: - # optional, we have a workaround - pass - - self.BN_bin2bn = self._lib.BN_bin2bn - self.BN_bin2bn.restype = ctypes.c_void_p - self.BN_bin2bn.argtypes = [ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p] - - self.EC_KEY_free = self._lib.EC_KEY_free - self.EC_KEY_free.restype = None - self.EC_KEY_free.argtypes = [ctypes.c_void_p] - - self.EC_KEY_new_by_curve_name = self._lib.EC_KEY_new_by_curve_name - self.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p - self.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int] - - self.EC_KEY_generate_key = self._lib.EC_KEY_generate_key - self.EC_KEY_generate_key.restype = ctypes.c_int - self.EC_KEY_generate_key.argtypes = [ctypes.c_void_p] - - self.EC_KEY_check_key = self._lib.EC_KEY_check_key - self.EC_KEY_check_key.restype = ctypes.c_int - self.EC_KEY_check_key.argtypes = [ctypes.c_void_p] - - self.EC_KEY_get0_private_key = self._lib.EC_KEY_get0_private_key - self.EC_KEY_get0_private_key.restype = ctypes.c_void_p - self.EC_KEY_get0_private_key.argtypes = [ctypes.c_void_p] - - self.EC_KEY_get0_public_key = self._lib.EC_KEY_get0_public_key - self.EC_KEY_get0_public_key.restype = ctypes.c_void_p - self.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p] - - self.EC_KEY_get0_group = self._lib.EC_KEY_get0_group - self.EC_KEY_get0_group.restype = ctypes.c_void_p - self.EC_KEY_get0_group.argtypes = [ctypes.c_void_p] - - self.EC_POINT_get_affine_coordinates_GFp = \ - self._lib.EC_POINT_get_affine_coordinates_GFp - self.EC_POINT_get_affine_coordinates_GFp.restype = ctypes.c_int - self.EC_POINT_get_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - try: - self.EC_POINT_get_affine_coordinates = \ - self._lib.EC_POINT_get_affine_coordinates - except AttributeError: - # OpenSSL docs say only use this for backwards compatibility - self.EC_POINT_get_affine_coordinates = \ - self._lib.EC_POINT_get_affine_coordinates_GF2m - self.EC_POINT_get_affine_coordinates.restype = ctypes.c_int - self.EC_POINT_get_affine_coordinates.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key - self.EC_KEY_set_private_key.restype = ctypes.c_int - self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_KEY_set_public_key = self._lib.EC_KEY_set_public_key - self.EC_KEY_set_public_key.restype = ctypes.c_int - self.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_KEY_set_group = self._lib.EC_KEY_set_group - self.EC_KEY_set_group.restype = ctypes.c_int - self.EC_KEY_set_group.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_POINT_set_affine_coordinates_GFp = \ - self._lib.EC_POINT_set_affine_coordinates_GFp - self.EC_POINT_set_affine_coordinates_GFp.restype = ctypes.c_int - self.EC_POINT_set_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - try: - self.EC_POINT_set_affine_coordinates = \ - self._lib.EC_POINT_set_affine_coordinates - except AttributeError: - # OpenSSL docs say only use this for backwards compatibility - self.EC_POINT_set_affine_coordinates = \ - self._lib.EC_POINT_set_affine_coordinates_GF2m - self.EC_POINT_set_affine_coordinates.restype = ctypes.c_int - self.EC_POINT_set_affine_coordinates.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - try: - self.EC_POINT_set_compressed_coordinates = \ - self._lib.EC_POINT_set_compressed_coordinates - except AttributeError: - # OpenSSL docs say only use this for backwards compatibility - self.EC_POINT_set_compressed_coordinates = \ - self._lib.EC_POINT_set_compressed_coordinates_GF2m - self.EC_POINT_set_compressed_coordinates.restype = ctypes.c_int - self.EC_POINT_set_compressed_coordinates.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_int, - ctypes.c_void_p] - - self.EC_POINT_new = self._lib.EC_POINT_new - self.EC_POINT_new.restype = ctypes.c_void_p - self.EC_POINT_new.argtypes = [ctypes.c_void_p] - - self.EC_POINT_free = self._lib.EC_POINT_free - self.EC_POINT_free.restype = None - self.EC_POINT_free.argtypes = [ctypes.c_void_p] - - self.BN_CTX_free = self._lib.BN_CTX_free - self.BN_CTX_free.restype = None - self.BN_CTX_free.argtypes = [ctypes.c_void_p] - - self.EC_POINT_mul = self._lib.EC_POINT_mul - self.EC_POINT_mul.restype = None - self.EC_POINT_mul.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key - self.EC_KEY_set_private_key.restype = ctypes.c_int - self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - if self._hexversion >= 0x10100000 and not self._libreSSL: - self.EC_KEY_OpenSSL = self._lib.EC_KEY_OpenSSL - self._lib.EC_KEY_OpenSSL.restype = ctypes.c_void_p - self._lib.EC_KEY_OpenSSL.argtypes = [] - - self.EC_KEY_set_method = self._lib.EC_KEY_set_method - self._lib.EC_KEY_set_method.restype = ctypes.c_int - self._lib.EC_KEY_set_method.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - else: - self.ECDH_OpenSSL = self._lib.ECDH_OpenSSL - self._lib.ECDH_OpenSSL.restype = ctypes.c_void_p - self._lib.ECDH_OpenSSL.argtypes = [] - - self.ECDH_set_method = self._lib.ECDH_set_method - self._lib.ECDH_set_method.restype = ctypes.c_int - self._lib.ECDH_set_method.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.ECDH_compute_key = self._lib.ECDH_compute_key - self.ECDH_compute_key.restype = ctypes.c_int - self.ECDH_compute_key.argtypes = [ctypes.c_void_p, - ctypes.c_int, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EVP_CipherInit_ex = self._lib.EVP_CipherInit_ex - self.EVP_CipherInit_ex.restype = ctypes.c_int - self.EVP_CipherInit_ex.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EVP_CIPHER_CTX_new = self._lib.EVP_CIPHER_CTX_new - self.EVP_CIPHER_CTX_new.restype = ctypes.c_void_p - self.EVP_CIPHER_CTX_new.argtypes = [] - - # Cipher - self.EVP_aes_128_cfb128 = self._lib.EVP_aes_128_cfb128 - self.EVP_aes_128_cfb128.restype = ctypes.c_void_p - self.EVP_aes_128_cfb128.argtypes = [] - - self.EVP_aes_256_cfb128 = self._lib.EVP_aes_256_cfb128 - self.EVP_aes_256_cfb128.restype = ctypes.c_void_p - self.EVP_aes_256_cfb128.argtypes = [] - - self.EVP_aes_128_cbc = self._lib.EVP_aes_128_cbc - self.EVP_aes_128_cbc.restype = ctypes.c_void_p - self.EVP_aes_128_cbc.argtypes = [] - - self.EVP_aes_256_cbc = self._lib.EVP_aes_256_cbc - self.EVP_aes_256_cbc.restype = ctypes.c_void_p - self.EVP_aes_256_cbc.argtypes = [] - - # self.EVP_aes_128_ctr = self._lib.EVP_aes_128_ctr - # self.EVP_aes_128_ctr.restype = ctypes.c_void_p - # self.EVP_aes_128_ctr.argtypes = [] - - # self.EVP_aes_256_ctr = self._lib.EVP_aes_256_ctr - # self.EVP_aes_256_ctr.restype = ctypes.c_void_p - # self.EVP_aes_256_ctr.argtypes = [] - - self.EVP_aes_128_ofb = self._lib.EVP_aes_128_ofb - self.EVP_aes_128_ofb.restype = ctypes.c_void_p - self.EVP_aes_128_ofb.argtypes = [] - - self.EVP_aes_256_ofb = self._lib.EVP_aes_256_ofb - self.EVP_aes_256_ofb.restype = ctypes.c_void_p - self.EVP_aes_256_ofb.argtypes = [] - - self.EVP_bf_cbc = self._lib.EVP_bf_cbc - self.EVP_bf_cbc.restype = ctypes.c_void_p - self.EVP_bf_cbc.argtypes = [] - - self.EVP_bf_cfb64 = self._lib.EVP_bf_cfb64 - self.EVP_bf_cfb64.restype = ctypes.c_void_p - self.EVP_bf_cfb64.argtypes = [] - - self.EVP_rc4 = self._lib.EVP_rc4 - self.EVP_rc4.restype = ctypes.c_void_p - self.EVP_rc4.argtypes = [] - - if self._hexversion >= 0x10100000 and not self._libreSSL: - self.EVP_CIPHER_CTX_reset = self._lib.EVP_CIPHER_CTX_reset - self.EVP_CIPHER_CTX_reset.restype = ctypes.c_int - self.EVP_CIPHER_CTX_reset.argtypes = [ctypes.c_void_p] - else: - self.EVP_CIPHER_CTX_cleanup = self._lib.EVP_CIPHER_CTX_cleanup - self.EVP_CIPHER_CTX_cleanup.restype = ctypes.c_int - self.EVP_CIPHER_CTX_cleanup.argtypes = [ctypes.c_void_p] - - self.EVP_CIPHER_CTX_free = self._lib.EVP_CIPHER_CTX_free - self.EVP_CIPHER_CTX_free.restype = None - self.EVP_CIPHER_CTX_free.argtypes = [ctypes.c_void_p] - - self.EVP_CipherUpdate = self._lib.EVP_CipherUpdate - self.EVP_CipherUpdate.restype = ctypes.c_int - self.EVP_CipherUpdate.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_int] - - self.EVP_CipherFinal_ex = self._lib.EVP_CipherFinal_ex - self.EVP_CipherFinal_ex.restype = ctypes.c_int - self.EVP_CipherFinal_ex.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p] - - self.EVP_DigestInit = self._lib.EVP_DigestInit - self.EVP_DigestInit.restype = ctypes.c_int - self._lib.EVP_DigestInit.argtypes = [ctypes.c_void_p, ctypes.c_void_p] - - self.EVP_DigestInit_ex = self._lib.EVP_DigestInit_ex - self.EVP_DigestInit_ex.restype = ctypes.c_int - self._lib.EVP_DigestInit_ex.argtypes = 3 * [ctypes.c_void_p] - - self.EVP_DigestUpdate = self._lib.EVP_DigestUpdate - self.EVP_DigestUpdate.restype = ctypes.c_int - self.EVP_DigestUpdate.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_int] - - self.EVP_DigestFinal = self._lib.EVP_DigestFinal - self.EVP_DigestFinal.restype = ctypes.c_int - self.EVP_DigestFinal.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p] - - self.EVP_DigestFinal_ex = self._lib.EVP_DigestFinal_ex - self.EVP_DigestFinal_ex.restype = ctypes.c_int - self.EVP_DigestFinal_ex.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p] - - self.ECDSA_sign = self._lib.ECDSA_sign - self.ECDSA_sign.restype = ctypes.c_int - self.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_void_p, - ctypes.c_void_p, ctypes.c_void_p] - - self.ECDSA_verify = self._lib.ECDSA_verify - self.ECDSA_verify.restype = ctypes.c_int - self.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_void_p] - - if self._hexversion >= 0x10100000 and not self._libreSSL: - self.EVP_MD_CTX_new = self._lib.EVP_MD_CTX_new - self.EVP_MD_CTX_new.restype = ctypes.c_void_p - self.EVP_MD_CTX_new.argtypes = [] - - self.EVP_MD_CTX_reset = self._lib.EVP_MD_CTX_reset - self.EVP_MD_CTX_reset.restype = None - self.EVP_MD_CTX_reset.argtypes = [ctypes.c_void_p] - - self.EVP_MD_CTX_free = self._lib.EVP_MD_CTX_free - self.EVP_MD_CTX_free.restype = None - self.EVP_MD_CTX_free.argtypes = [ctypes.c_void_p] - - self.EVP_sha1 = self._lib.EVP_sha1 - self.EVP_sha1.restype = ctypes.c_void_p - self.EVP_sha1.argtypes = [] - - self.digest_ecdsa_sha1 = self.EVP_sha1 - else: - self.EVP_MD_CTX_create = self._lib.EVP_MD_CTX_create - self.EVP_MD_CTX_create.restype = ctypes.c_void_p - self.EVP_MD_CTX_create.argtypes = [] - - self.EVP_MD_CTX_init = self._lib.EVP_MD_CTX_init - self.EVP_MD_CTX_init.restype = None - self.EVP_MD_CTX_init.argtypes = [ctypes.c_void_p] - - self.EVP_MD_CTX_destroy = self._lib.EVP_MD_CTX_destroy - self.EVP_MD_CTX_destroy.restype = None - self.EVP_MD_CTX_destroy.argtypes = [ctypes.c_void_p] - - self.EVP_ecdsa = self._lib.EVP_ecdsa - self._lib.EVP_ecdsa.restype = ctypes.c_void_p - self._lib.EVP_ecdsa.argtypes = [] - - self.digest_ecdsa_sha1 = self.EVP_ecdsa - - self.RAND_bytes = self._lib.RAND_bytes - self.RAND_bytes.restype = ctypes.c_int - self.RAND_bytes.argtypes = [ctypes.c_void_p, ctypes.c_int] - - self.EVP_sha256 = self._lib.EVP_sha256 - self.EVP_sha256.restype = ctypes.c_void_p - self.EVP_sha256.argtypes = [] - - self.i2o_ECPublicKey = self._lib.i2o_ECPublicKey - self.i2o_ECPublicKey.restype = ctypes.c_void_p - self.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p] - - self.EVP_sha512 = self._lib.EVP_sha512 - self.EVP_sha512.restype = ctypes.c_void_p - self.EVP_sha512.argtypes = [] - - self.HMAC = self._lib.HMAC - self.HMAC.restype = ctypes.c_void_p - self.HMAC.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p, ctypes.c_void_p] - - try: - self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC - except Exception: - # The above is not compatible with all versions of OSX. - self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC_SHA1 - - self.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int - self.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_void_p, ctypes.c_int, - ctypes.c_void_p, ctypes.c_int, - ctypes.c_int, ctypes.c_void_p, - ctypes.c_int, ctypes.c_void_p] - - # Blind signature requirements - self.BN_CTX_new = self._lib.BN_CTX_new - self.BN_CTX_new.restype = ctypes.c_void_p - self.BN_CTX_new.argtypes = [] - - self.BN_dup = self._lib.BN_dup - self.BN_dup.restype = ctypes.c_void_p - self.BN_dup.argtypes = [ctypes.c_void_p] - - self.BN_rand = self._lib.BN_rand - self.BN_rand.restype = ctypes.c_int - self.BN_rand.argtypes = [ctypes.c_void_p, - ctypes.c_int, - ctypes.c_int] - - self.BN_set_word = self._lib.BN_set_word - self.BN_set_word.restype = ctypes.c_int - self.BN_set_word.argtypes = [ctypes.c_void_p, - ctypes.c_ulong] - - self.BN_mul = self._lib.BN_mul - self.BN_mul.restype = ctypes.c_int - self.BN_mul.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.BN_mod_add = self._lib.BN_mod_add - self.BN_mod_add.restype = ctypes.c_int - self.BN_mod_add.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.BN_mod_inverse = self._lib.BN_mod_inverse - self.BN_mod_inverse.restype = ctypes.c_void_p - self.BN_mod_inverse.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.BN_mod_mul = self._lib.BN_mod_mul - self.BN_mod_mul.restype = ctypes.c_int - self.BN_mod_mul.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.BN_lshift = self._lib.BN_lshift - self.BN_lshift.restype = ctypes.c_int - self.BN_lshift.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_int] - - self.BN_sub_word = self._lib.BN_sub_word - self.BN_sub_word.restype = ctypes.c_int - self.BN_sub_word.argtypes = [ctypes.c_void_p, - ctypes.c_ulong] - - self.BN_cmp = self._lib.BN_cmp - self.BN_cmp.restype = ctypes.c_int - self.BN_cmp.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - try: - self.BN_is_odd = self._lib.BN_is_odd - self.BN_is_odd.restype = ctypes.c_int - self.BN_is_odd.argtypes = [ctypes.c_void_p] - except AttributeError: - # OpenSSL 1.1.0 implements this as a function, but earlier - # versions as macro, so we need to workaround - self.BN_is_odd = self.BN_is_odd_compatible - - self.BN_bn2dec = self._lib.BN_bn2dec - self.BN_bn2dec.restype = ctypes.c_char_p - self.BN_bn2dec.argtypes = [ctypes.c_void_p] - - self.EC_GROUP_new_by_curve_name = self._lib.EC_GROUP_new_by_curve_name - self.EC_GROUP_new_by_curve_name.restype = ctypes.c_void_p - self.EC_GROUP_new_by_curve_name.argtypes = [ctypes.c_int] - - self.EC_GROUP_get_order = self._lib.EC_GROUP_get_order - self.EC_GROUP_get_order.restype = ctypes.c_int - self.EC_GROUP_get_order.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_GROUP_get_cofactor = self._lib.EC_GROUP_get_cofactor - self.EC_GROUP_get_cofactor.restype = ctypes.c_int - self.EC_GROUP_get_cofactor.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_GROUP_get0_generator = self._lib.EC_GROUP_get0_generator - self.EC_GROUP_get0_generator.restype = ctypes.c_void_p - self.EC_GROUP_get0_generator.argtypes = [ctypes.c_void_p] - - self.EC_POINT_copy = self._lib.EC_POINT_copy - self.EC_POINT_copy.restype = ctypes.c_int - self.EC_POINT_copy.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_POINT_add = self._lib.EC_POINT_add - self.EC_POINT_add.restype = ctypes.c_int - self.EC_POINT_add.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_POINT_cmp = self._lib.EC_POINT_cmp - self.EC_POINT_cmp.restype = ctypes.c_int - self.EC_POINT_cmp.argtypes = [ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p, - ctypes.c_void_p] - - self.EC_POINT_set_to_infinity = self._lib.EC_POINT_set_to_infinity - self.EC_POINT_set_to_infinity.restype = ctypes.c_int - self.EC_POINT_set_to_infinity.argtypes = [ctypes.c_void_p, - ctypes.c_void_p] - - self._set_ciphers() - self._set_curves() - - def _set_ciphers(self): - self.cipher_algo = { - 'aes-128-cbc': CipherName( - 'aes-128-cbc', self.EVP_aes_128_cbc, 16), - 'aes-256-cbc': CipherName( - 'aes-256-cbc', self.EVP_aes_256_cbc, 16), - 'aes-128-cfb': CipherName( - 'aes-128-cfb', self.EVP_aes_128_cfb128, 16), - 'aes-256-cfb': CipherName( - 'aes-256-cfb', self.EVP_aes_256_cfb128, 16), - 'aes-128-ofb': CipherName( - 'aes-128-ofb', self._lib.EVP_aes_128_ofb, 16), - 'aes-256-ofb': CipherName( - 'aes-256-ofb', self._lib.EVP_aes_256_ofb, 16), - # 'aes-128-ctr': CipherName( - # 'aes-128-ctr', self._lib.EVP_aes_128_ctr, 16), - # 'aes-256-ctr': CipherName( - # 'aes-256-ctr', self._lib.EVP_aes_256_ctr, 16), - 'bf-cfb': CipherName( - 'bf-cfb', self.EVP_bf_cfb64, 8), - 'bf-cbc': CipherName( - 'bf-cbc', self.EVP_bf_cbc, 8), - # 128 is the initialisation size not block size - 'rc4': CipherName( - 'rc4', self.EVP_rc4, 128), - } - - def _set_curves(self): - self.curves = { - 'secp112r1': 704, - 'secp112r2': 705, - 'secp128r1': 706, - 'secp128r2': 707, - 'secp160k1': 708, - 'secp160r1': 709, - 'secp160r2': 710, - 'secp192k1': 711, - 'secp224k1': 712, - 'secp224r1': 713, - 'secp256k1': 714, - 'secp384r1': 715, - 'secp521r1': 716, - 'sect113r1': 717, - 'sect113r2': 718, - 'sect131r1': 719, - 'sect131r2': 720, - 'sect163k1': 721, - 'sect163r1': 722, - 'sect163r2': 723, - 'sect193r1': 724, - 'sect193r2': 725, - 'sect233k1': 726, - 'sect233r1': 727, - 'sect239k1': 728, - 'sect283k1': 729, - 'sect283r1': 730, - 'sect409k1': 731, - 'sect409r1': 732, - 'sect571k1': 733, - 'sect571r1': 734, - } - - def BN_num_bytes(self, x): - """ - returns the length of a BN (OpenSSl API) - """ - return int((self.BN_num_bits(x) + 7) / 8) - - def BN_is_odd_compatible(self, x): - """ - returns if BN is odd - we assume big endianness, and that BN is initialised - """ - length = self.BN_num_bytes(x) - data = self.malloc(0, length) - OpenSSL.BN_bn2bin(x, data) - return ord(data[length - 1]) & 1 - - def get_cipher(self, name): - """ - returns the OpenSSL cipher instance - """ - if name not in self.cipher_algo: - raise Exception("Unknown cipher") - return self.cipher_algo[name] - - def get_curve(self, name): - """ - returns the id of a elliptic curve - """ - if name not in self.curves: - raise Exception("Unknown curve") - return self.curves[name] - - def get_curve_by_id(self, id_): - """ - returns the name of a elliptic curve with his id - """ - res = None - for i in self.curves: - if self.curves[i] == id_: - res = i - break - if res is None: - raise Exception("Unknown curve") - return res - - def rand(self, size): - """ - OpenSSL random function - """ - buffer_ = self.malloc(0, size) - # This pyelliptic library, by default, didn't check the return value - # of RAND_bytes. It is evidently possible that it returned an error - # and not-actually-random data. However, in tests on various - # operating systems, while generating hundreds of gigabytes of random - # strings of various sizes I could not get an error to occur. - # Also Bitcoin doesn't check the return value of RAND_bytes either. - # Fixed in Bitmessage version 0.4.2 (in source code on 2013-10-13) - while self.RAND_bytes(buffer_, size) != 1: - import time - time.sleep(1) - return buffer_.raw - - def malloc(self, data, size): - """ - returns a create_string_buffer (ctypes) - """ - buffer_ = None - if data != 0: - if sys.version_info.major == 3 and isinstance(data, type('')): - data = data.encode() - buffer_ = self.create_string_buffer(data, size) - else: - buffer_ = self.create_string_buffer(size) - return buffer_ - - -def loadOpenSSL(): - """This function finds and load the OpenSSL library""" - # pylint: disable=global-statement - global OpenSSL - from os import path, environ - from ctypes.util import find_library - - libdir = [] - if getattr(sys, 'frozen', None): - if 'darwin' in sys.platform: - libdir.extend([ - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.1.1.0.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.1.0.2.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.1.0.1.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.1.0.0.dylib'), - path.join( - environ['RESOURCEPATH'], '..', - 'Frameworks', 'libcrypto.0.9.8.dylib'), - ]) - elif 'win32' in sys.platform or 'win64' in sys.platform: - libdir.append(path.join(sys._MEIPASS, 'libeay32.dll')) - else: - libdir.extend([ - path.join(sys._MEIPASS, 'libcrypto.so'), - path.join(sys._MEIPASS, 'libssl.so'), - path.join(sys._MEIPASS, 'libcrypto.so.1.1.0'), - path.join(sys._MEIPASS, 'libssl.so.1.1.0'), - path.join(sys._MEIPASS, 'libcrypto.so.1.0.2'), - path.join(sys._MEIPASS, 'libssl.so.1.0.2'), - path.join(sys._MEIPASS, 'libcrypto.so.1.0.1'), - path.join(sys._MEIPASS, 'libssl.so.1.0.1'), - path.join(sys._MEIPASS, 'libcrypto.so.1.0.0'), - path.join(sys._MEIPASS, 'libssl.so.1.0.0'), - path.join(sys._MEIPASS, 'libcrypto.so.0.9.8'), - path.join(sys._MEIPASS, 'libssl.so.0.9.8'), - ]) - if 'darwin' in sys.platform: - libdir.extend([ - 'libcrypto.dylib', '/usr/local/opt/openssl/lib/libcrypto.dylib']) - elif 'win32' in sys.platform or 'win64' in sys.platform: - libdir.append('libeay32.dll') - else: - libdir.append('libcrypto.so') - libdir.append('libssl.so') - libdir.append('libcrypto.so.1.0.0') - libdir.append('libssl.so.1.0.0') - if 'linux' in sys.platform or 'darwin' in sys.platform \ - or 'bsd' in sys.platform: - libdir.append(find_library('ssl')) - elif 'win32' in sys.platform or 'win64' in sys.platform: - libdir.append(find_library('libeay32')) - for library in libdir: - try: - OpenSSL = _OpenSSL(library) - return - except Exception: - pass - raise Exception( - "Couldn't find and load the OpenSSL library. You must install it.") - - -loadOpenSSL() diff --git a/src/tests/mock/pybitmessage/pyelliptic/tests/__init__.py b/src/tests/mock/pybitmessage/pyelliptic/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/tests/mock/pybitmessage/pyelliptic/tests/test_arithmetic.py b/src/tests/mock/pybitmessage/pyelliptic/tests/test_arithmetic.py deleted file mode 100644 index 7b5c59b1..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/tests/test_arithmetic.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -Test the arithmetic functions -""" - -from binascii import unhexlify -import unittest - -try: - from pyelliptic import arithmetic -except ImportError: - from pybitmessage.pyelliptic import arithmetic - - -# These keys are from addresses test script -sample_pubsigningkey = ( - b'044a367f049ec16cb6b6118eb734a9962d10b8db59c890cd08f210c43ff08bdf09d' - b'16f502ca26cd0713f38988a1237f1fc8fa07b15653c996dc4013af6d15505ce') -sample_pubencryptionkey = ( - b'044597d59177fc1d89555d38915f581b5ff2286b39d022ca0283d2bdd5c36be5d3c' - b'e7b9b97792327851a562752e4b79475d1f51f5a71352482b241227f45ed36a9') -sample_privsigningkey = \ - b'93d0b61371a54b53df143b954035d612f8efa8a3ed1cf842c2186bfd8f876665' -sample_privencryptionkey = \ - b'4b0b73a54e19b059dc274ab69df095fe699f43b17397bca26fdf40f4d7400a3a' - -sample_factor = \ - 66858749573256452658262553961707680376751171096153613379801854825275240965733 -# G * sample_factor -sample_point = ( - 33567437183004486938355437500683826356288335339807546987348409590129959362313, - 94730058721143827257669456336351159718085716196507891067256111928318063085006 -) - - -class TestArithmetic(unittest.TestCase): - """Test arithmetic functions""" - def test_base10_multiply(self): - """Test arithmetic.base10_multiply""" - self.assertEqual( - sample_point, - arithmetic.base10_multiply(arithmetic.G, sample_factor)) - - def test_decode(self): - """Decode sample privsigningkey from hex to int and compare to factor""" - self.assertEqual( - arithmetic.decode(sample_privsigningkey, 16), sample_factor) - - def test_encode(self): - """Encode sample factor into hex and compare to privsigningkey""" - self.assertEqual( - arithmetic.encode(sample_factor, 16), sample_privsigningkey) - - def test_changebase(self): - """Check the results of changebase()""" - self.assertEqual( - arithmetic.changebase(sample_privsigningkey, 16, 256, minlen=32), - unhexlify(sample_privsigningkey)) - self.assertEqual( - arithmetic.changebase(sample_pubsigningkey, 16, 256, minlen=64), - unhexlify(sample_pubsigningkey)) - self.assertEqual( - 32, # padding - len(arithmetic.changebase(sample_privsigningkey[:5], 16, 256, 32))) - - def test_hex_to_point(self): - """Check that sample_pubsigningkey is sample_point encoded in hex""" - self.assertEqual( - arithmetic.hex_to_point(sample_pubsigningkey), sample_point) - - def test_point_to_hex(self): - """Check that sample_point is sample_pubsigningkey decoded from hex""" - self.assertEqual( - arithmetic.point_to_hex(sample_point), sample_pubsigningkey) - - def test_privtopub(self): - """Generate public keys and check the result""" - self.assertEqual( - arithmetic.privtopub(sample_privsigningkey), - sample_pubsigningkey - ) - self.assertEqual( - arithmetic.privtopub(sample_privencryptionkey), - sample_pubencryptionkey - ) diff --git a/src/tests/mock/pybitmessage/pyelliptic/tests/test_blindsig.py b/src/tests/mock/pybitmessage/pyelliptic/tests/test_blindsig.py deleted file mode 100644 index 9ed72081..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/tests/test_blindsig.py +++ /dev/null @@ -1,277 +0,0 @@ -""" -Test for ECC blind signatures -""" -import os -import unittest -from hashlib import sha256 - -try: - from pyelliptic import ECCBlind, ECCBlindChain, OpenSSL -except ImportError: - from pybitmessage.pyelliptic import ECCBlind, ECCBlindChain, OpenSSL - -# pylint: disable=protected-access - - -class TestBlindSig(unittest.TestCase): - """ - Test case for ECC blind signature - """ - def test_blind_sig(self): - """Test full sequence using a random certifier key and a random message""" - # See page 127 of the paper - # (1) Initialization - signer_obj = ECCBlind() - point_r = signer_obj.signer_init() - self.assertEqual(len(signer_obj.pubkey()), 35) - - # (2) Request - requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) - # only 64 byte messages are planned to be used in Bitmessage - msg = os.urandom(64) - msg_blinded = requester_obj.create_signing_request(point_r, msg) - self.assertEqual(len(msg_blinded), 32) - - # check - self.assertNotEqual(sha256(msg).digest(), msg_blinded) - - # (3) Signature Generation - signature_blinded = signer_obj.blind_sign(msg_blinded) - assert isinstance(signature_blinded, bytes) - self.assertEqual(len(signature_blinded), 32) - - # (4) Extraction - signature = requester_obj.unblind(signature_blinded) - assert isinstance(signature, bytes) - self.assertEqual(len(signature), 65) - - self.assertNotEqual(signature, signature_blinded) - - # (5) Verification - verifier_obj = ECCBlind(pubkey=signer_obj.pubkey()) - self.assertTrue(verifier_obj.verify(msg, signature)) - - def test_is_odd(self): - """Test our implementation of BN_is_odd""" - for _ in range(1024): - obj = ECCBlind() - x = OpenSSL.BN_new() - y = OpenSSL.BN_new() - OpenSSL.EC_POINT_get_affine_coordinates( - obj.group, obj.Q, x, y, 0) - self.assertEqual(OpenSSL.BN_is_odd(y), - OpenSSL.BN_is_odd_compatible(y)) - - def test_serialize_ec_point(self): - """Test EC point serialization/deserialization""" - for _ in range(1024): - try: - obj = ECCBlind() - obj2 = ECCBlind() - randompoint = obj.Q - serialized = obj._ec_point_serialize(randompoint) - secondpoint = obj2._ec_point_deserialize(serialized) - x0 = OpenSSL.BN_new() - y0 = OpenSSL.BN_new() - OpenSSL.EC_POINT_get_affine_coordinates(obj.group, - randompoint, x0, - y0, obj.ctx) - x1 = OpenSSL.BN_new() - y1 = OpenSSL.BN_new() - OpenSSL.EC_POINT_get_affine_coordinates(obj2.group, - secondpoint, x1, - y1, obj2.ctx) - - self.assertEqual(OpenSSL.BN_cmp(y0, y1), 0) - self.assertEqual(OpenSSL.BN_cmp(x0, x1), 0) - self.assertEqual(OpenSSL.EC_POINT_cmp(obj.group, randompoint, - secondpoint, 0), 0) - finally: - OpenSSL.BN_free(x0) - OpenSSL.BN_free(x1) - OpenSSL.BN_free(y0) - OpenSSL.BN_free(y1) - del obj - del obj2 - - def test_serialize_bn(self): - """Test Bignum serialization/deserialization""" - for _ in range(1024): - obj = ECCBlind() - obj2 = ECCBlind() - randomnum = obj.d - serialized = obj._bn_serialize(randomnum) - secondnum = obj2._bn_deserialize(serialized) - self.assertEqual(OpenSSL.BN_cmp(randomnum, secondnum), 0) - - def test_blind_sig_many(self): - """Test a lot of blind signatures""" - for _ in range(1024): - self.test_blind_sig() - - def test_blind_sig_value(self): - """Test blind signature value checking""" - signer_obj = ECCBlind(value=5) - point_r = signer_obj.signer_init() - requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) - msg = os.urandom(64) - msg_blinded = requester_obj.create_signing_request(point_r, msg) - signature_blinded = signer_obj.blind_sign(msg_blinded) - signature = requester_obj.unblind(signature_blinded) - verifier_obj = ECCBlind(pubkey=signer_obj.pubkey()) - self.assertFalse(verifier_obj.verify(msg, signature, value=8)) - - def test_blind_sig_expiration(self): - """Test blind signature expiration checking""" - signer_obj = ECCBlind(year=2020, month=1) - point_r = signer_obj.signer_init() - requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) - msg = os.urandom(64) - msg_blinded = requester_obj.create_signing_request(point_r, msg) - signature_blinded = signer_obj.blind_sign(msg_blinded) - signature = requester_obj.unblind(signature_blinded) - verifier_obj = ECCBlind(pubkey=signer_obj.pubkey()) - self.assertFalse(verifier_obj.verify(msg, signature)) - - def test_blind_sig_chain(self): # pylint: disable=too-many-locals - """Test blind signature chain using a random certifier key and a random message""" - - test_levels = 4 - msg = os.urandom(1024) - - ca = ECCBlind() - signer_obj = ca - - output = bytearray() - - for level in range(test_levels): - if not level: - output.extend(ca.pubkey()) - requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) - child_obj = ECCBlind() - point_r = signer_obj.signer_init() - pubkey = child_obj.pubkey() - - if level == test_levels - 1: - msg_blinded = requester_obj.create_signing_request(point_r, - msg) - else: - msg_blinded = requester_obj.create_signing_request(point_r, - pubkey) - signature_blinded = signer_obj.blind_sign(msg_blinded) - signature = requester_obj.unblind(signature_blinded) - if level != test_levels - 1: - output.extend(pubkey) - output.extend(signature) - signer_obj = child_obj - verifychain = ECCBlindChain(ca=ca.pubkey(), chain=bytes(output)) - self.assertTrue(verifychain.verify(msg=msg, value=1)) - - def test_blind_sig_chain_wrong_ca(self): # pylint: disable=too-many-locals - """Test blind signature chain with an unlisted ca""" - - test_levels = 4 - msg = os.urandom(1024) - - ca = ECCBlind() - fake_ca = ECCBlind() - signer_obj = fake_ca - - output = bytearray() - - for level in range(test_levels): - requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) - child_obj = ECCBlind() - if not level: - # unlisted CA, but a syntactically valid pubkey - output.extend(fake_ca.pubkey()) - point_r = signer_obj.signer_init() - pubkey = child_obj.pubkey() - - if level == test_levels - 1: - msg_blinded = requester_obj.create_signing_request(point_r, - msg) - else: - msg_blinded = requester_obj.create_signing_request(point_r, - pubkey) - signature_blinded = signer_obj.blind_sign(msg_blinded) - signature = requester_obj.unblind(signature_blinded) - if level != test_levels - 1: - output.extend(pubkey) - output.extend(signature) - signer_obj = child_obj - verifychain = ECCBlindChain(ca=ca.pubkey(), chain=bytes(output)) - self.assertFalse(verifychain.verify(msg, 1)) - - def test_blind_sig_chain_wrong_msg(self): # pylint: disable=too-many-locals - """Test blind signature chain with a fake message""" - - test_levels = 4 - msg = os.urandom(1024) - fake_msg = os.urandom(1024) - - ca = ECCBlind() - signer_obj = ca - - output = bytearray() - - for level in range(test_levels): - if not level: - output.extend(ca.pubkey()) - requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) - child_obj = ECCBlind() - point_r = signer_obj.signer_init() - pubkey = child_obj.pubkey() - - if level == test_levels - 1: - msg_blinded = requester_obj.create_signing_request(point_r, - msg) - else: - msg_blinded = requester_obj.create_signing_request(point_r, - pubkey) - signature_blinded = signer_obj.blind_sign(msg_blinded) - signature = requester_obj.unblind(signature_blinded) - if level != test_levels - 1: - output.extend(pubkey) - output.extend(signature) - signer_obj = child_obj - verifychain = ECCBlindChain(ca=ca.pubkey(), chain=bytes(output)) - self.assertFalse(verifychain.verify(fake_msg, 1)) - - def test_blind_sig_chain_wrong_intermediary(self): # pylint: disable=too-many-locals - """Test blind signature chain using a fake intermediary pubkey""" - - test_levels = 4 - msg = os.urandom(1024) - wrong_level = 2 - - ca = ECCBlind() - signer_obj = ca - fake_intermediary = ECCBlind() - - output = bytearray() - - for level in range(test_levels): - if not level: - output.extend(ca.pubkey()) - requester_obj = ECCBlind(pubkey=signer_obj.pubkey()) - child_obj = ECCBlind() - point_r = signer_obj.signer_init() - pubkey = child_obj.pubkey() - - if level == test_levels - 1: - msg_blinded = requester_obj.create_signing_request(point_r, - msg) - else: - msg_blinded = requester_obj.create_signing_request(point_r, - pubkey) - signature_blinded = signer_obj.blind_sign(msg_blinded) - signature = requester_obj.unblind(signature_blinded) - if level == wrong_level: - output.extend(fake_intermediary.pubkey()) - elif level != test_levels - 1: - output.extend(pubkey) - output.extend(signature) - signer_obj = child_obj - verifychain = ECCBlindChain(ca=ca.pubkey(), chain=bytes(output)) - self.assertFalse(verifychain.verify(msg, 1)) diff --git a/src/tests/mock/pybitmessage/pyelliptic/tests/test_openssl.py b/src/tests/mock/pybitmessage/pyelliptic/tests/test_openssl.py deleted file mode 100644 index cb789277..00000000 --- a/src/tests/mock/pybitmessage/pyelliptic/tests/test_openssl.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Test if OpenSSL is working correctly -""" -import unittest - -try: - from pyelliptic.openssl import OpenSSL -except ImportError: - from pybitmessage.pyelliptic import OpenSSL - -try: - OpenSSL.BN_bn2binpad - have_pad = True -except AttributeError: - have_pad = None - - -class TestOpenSSL(unittest.TestCase): - """ - Test cases for OpenSSL - """ - def test_is_odd(self): - """Test BN_is_odd implementation""" - ctx = OpenSSL.BN_CTX_new() - a = OpenSSL.BN_new() - group = OpenSSL.EC_GROUP_new_by_curve_name( - OpenSSL.get_curve("secp256k1")) - OpenSSL.EC_GROUP_get_order(group, a, ctx) - - bad = 0 - for _ in range(1024): - OpenSSL.BN_rand(a, OpenSSL.BN_num_bits(a), 0, 0) - if not OpenSSL.BN_is_odd(a) == OpenSSL.BN_is_odd_compatible(a): - bad += 1 - self.assertEqual(bad, 0) - - @unittest.skipUnless(have_pad, 'Skipping OpenSSL pad test') - def test_padding(self): - """Test an alternative implementation of bn2binpad""" - - ctx = OpenSSL.BN_CTX_new() - a = OpenSSL.BN_new() - n = OpenSSL.BN_new() - group = OpenSSL.EC_GROUP_new_by_curve_name( - OpenSSL.get_curve("secp256k1")) - OpenSSL.EC_GROUP_get_order(group, n, ctx) - - bad = 0 - for _ in range(1024): - OpenSSL.BN_rand(a, OpenSSL.BN_num_bits(n), 0, 0) - b = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(n)) - c = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(a)) - OpenSSL.BN_bn2binpad(a, b, OpenSSL.BN_num_bytes(n)) - OpenSSL.BN_bn2bin(a, c) - if b.raw != c.raw.rjust(OpenSSL.BN_num_bytes(n), b'\x00'): - bad += 1 - self.assertEqual(bad, 0) diff --git a/src/tests/mock/pybitmessage/qidenticon.py b/src/tests/mock/pybitmessage/qidenticon.py deleted file mode 100644 index 30b61b9b..00000000 --- a/src/tests/mock/pybitmessage/qidenticon.py +++ /dev/null @@ -1,276 +0,0 @@ -### -# qidenticon.py is Licesensed under FreeBSD License. -# (http://www.freebsd.org/copyright/freebsd-license.html) -# -# Copyright 1994-2009 Shin Adachi. All rights reserved. -# Copyright 2013 "Sendiulo". All rights reserved. -# Copyright 2018-2021 The Bitmessage Developers. All rights reserved. -# -# Redistribution and use in source and binary forms, -# with or without modification, are permitted provided that the following -# conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS -# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -### - -# pylint: disable=too-many-locals,too-many-arguments,too-many-function-args -""" -Usage ------ - ->>> import qidenticon ->>> qidenticon.render_identicon(code, size) - -Returns an instance of :class:`QPixmap` which have generated identicon image. -``size`` specifies `patch size`. Generated image size is 3 * ``size``. -""" - -from six.moves import range - -try: - from PyQt5 import QtCore, QtGui -except ImportError: - from PyQt4 import QtCore, QtGui - - -class IdenticonRendererBase(object): - """Encapsulate methods around rendering identicons""" - - PATH_SET = [] - - def __init__(self, code): - """ - :param code: code for icon - """ - if not isinstance(code, int): - code = int(code) - self.code = code - - def render(self, size, twoColor, opacity, penwidth): - """ - render identicon to QPixmap - - :param size: identicon patchsize. (image size is 3 * [size]) - :returns: :class:`QPixmap` - """ - - # decode the code - middle, corner, side, foreColor, secondColor, swap_cross = \ - self.decode(self.code, twoColor) - - # make image - image = QtGui.QPixmap( - QtCore.QSize(size * 3 + penwidth, size * 3 + penwidth)) - - # fill background - backColor = QtGui.QColor(255, 255, 255, opacity) - image.fill(backColor) - - kwds = { - 'image': image, - 'size': size, - 'foreColor': foreColor if swap_cross else secondColor, - 'penwidth': penwidth, - 'backColor': backColor} - - # middle patch - image = self.drawPatchQt( - (1, 1), middle[2], middle[1], middle[0], **kwds) - - # side patch - kwds['foreColor'] = foreColor - kwds['patch_type'] = side[0] - for i in range(4): - pos = [(1, 0), (2, 1), (1, 2), (0, 1)][i] - image = self.drawPatchQt(pos, side[2] + 1 + i, side[1], **kwds) - - # corner patch - kwds['foreColor'] = secondColor - kwds['patch_type'] = corner[0] - for i in range(4): - pos = [(0, 0), (2, 0), (2, 2), (0, 2)][i] - image = self.drawPatchQt(pos, corner[2] + 1 + i, corner[1], **kwds) - - return image - - def drawPatchQt( - self, pos, turn, invert, patch_type, image, size, foreColor, - backColor, penwidth): # pylint: disable=unused-argument - """ - :param size: patch size - """ - path = self.PATH_SET[patch_type] - if not path: - # blank patch - invert = not invert - path = [(0., 0.), (1., 0.), (1., 1.), (0., 1.), (0., 0.)] - - polygon = QtGui.QPolygonF([ - QtCore.QPointF(x * size, y * size) for x, y in path]) - - rot = turn % 4 - rect = [ - QtCore.QPointF(0., 0.), QtCore.QPointF(size, 0.), - QtCore.QPointF(size, size), QtCore.QPointF(0., size)] - rotation = [0, 90, 180, 270] - - nopen = QtGui.QPen(foreColor, QtCore.Qt.NoPen) - foreBrush = QtGui.QBrush(foreColor, QtCore.Qt.SolidPattern) - if penwidth > 0: - pen_color = QtGui.QColor(255, 255, 255) - pen = QtGui.QPen(pen_color, QtCore.Qt.SolidPattern) - pen.setWidth(penwidth) - - painter = QtGui.QPainter() - painter.begin(image) - painter.setPen(nopen) - - painter.translate( - pos[0] * size + penwidth / 2, pos[1] * size + penwidth / 2) - painter.translate(rect[rot]) - painter.rotate(rotation[rot]) - - if invert: - # subtract the actual polygon from a rectangle to invert it - poly_rect = QtGui.QPolygonF(rect) - polygon = poly_rect.subtracted(polygon) - painter.setBrush(foreBrush) - if penwidth > 0: - # draw the borders - painter.setPen(pen) - painter.drawPolygon(polygon, QtCore.Qt.WindingFill) - # draw the fill - painter.setPen(nopen) - painter.drawPolygon(polygon, QtCore.Qt.WindingFill) - - painter.end() - - return image - - def decode(self, code, twoColor): - """virtual functions""" - raise NotImplementedError - - -class DonRenderer(IdenticonRendererBase): - """ - Don Park's implementation of identicon, see: - https://blog.docuverse.com/2007/01/18/identicon-updated-and-source-released - """ - - PATH_SET = [ - # [0] full square: - [(0, 0), (4, 0), (4, 4), (0, 4)], - # [1] right-angled triangle pointing top-left: - [(0, 0), (4, 0), (0, 4)], - # [2] upwardy triangle: - [(2, 0), (4, 4), (0, 4)], - # [3] left half of square, standing rectangle: - [(0, 0), (2, 0), (2, 4), (0, 4)], - # [4] square standing on diagonale: - [(2, 0), (4, 2), (2, 4), (0, 2)], - # [5] kite pointing topleft: - [(0, 0), (4, 2), (4, 4), (2, 4)], - # [6] Sierpinski triangle, fractal triangles: - [(2, 0), (4, 4), (2, 4), (3, 2), (1, 2), (2, 4), (0, 4)], - # [7] sharp angled lefttop pointing triangle: - [(0, 0), (4, 2), (2, 4)], - # [8] small centered square: - [(1, 1), (3, 1), (3, 3), (1, 3)], - # [9] two small triangles: - [(2, 0), (4, 0), (0, 4), (0, 2), (2, 2)], - # [10] small topleft square: - [(0, 0), (2, 0), (2, 2), (0, 2)], - # [11] downpointing right-angled triangle on bottom: - [(0, 2), (4, 2), (2, 4)], - # [12] uppointing right-angled triangle on bottom: - [(2, 2), (4, 4), (0, 4)], - # [13] small rightbottom pointing right-angled triangle on topleft: - [(2, 0), (2, 2), (0, 2)], - # [14] small lefttop pointing right-angled triangle on topleft: - [(0, 0), (2, 0), (0, 2)], - # [15] empty: - []] - # get the [0] full square, [4] square standing on diagonale, - # [8] small centered square, or [15] empty tile: - MIDDLE_PATCH_SET = [0, 4, 8, 15] - - # modify path set - for idx, path in enumerate(PATH_SET): - if path: - p = [(vec[0] / 4.0, vec[1] / 4.0) for vec in path] - PATH_SET[idx] = p + p[:1] - - def decode(self, code, twoColor): - """decode the code""" - - shift = 0 - middleType = (code >> shift) & 0x03 - shift += 2 - middleInvert = (code >> shift) & 0x01 - shift += 1 - cornerType = (code >> shift) & 0x0F - shift += 4 - cornerInvert = (code >> shift) & 0x01 - shift += 1 - cornerTurn = (code >> shift) & 0x03 - shift += 2 - sideType = (code >> shift) & 0x0F - shift += 4 - sideInvert = (code >> shift) & 0x01 - shift += 1 - sideTurn = (code >> shift) & 0x03 - shift += 2 - blue = (code >> shift) & 0x1F - shift += 5 - green = (code >> shift) & 0x1F - shift += 5 - red = (code >> shift) & 0x1F - shift += 5 - second_blue = (code >> shift) & 0x1F - shift += 5 - second_green = (code >> shift) & 0x1F - shift += 5 - second_red = (code >> shift) & 0x1F - shift += 1 - swap_cross = (code >> shift) & 0x01 - - middleType = self.MIDDLE_PATCH_SET[middleType] - - foreColor = (red << 3, green << 3, blue << 3) - foreColor = QtGui.QColor(*foreColor) - - if twoColor: - secondColor = ( - second_blue << 3, second_green << 3, second_red << 3) - secondColor = QtGui.QColor(*secondColor) - else: - secondColor = foreColor - - return (middleType, middleInvert, 0),\ - (cornerType, cornerInvert, cornerTurn),\ - (sideType, sideInvert, sideTurn),\ - foreColor, secondColor, swap_cross - - -def render_identicon( - code, size, twoColor=False, opacity=255, penwidth=0, renderer=None): - """Render an image""" - if not renderer: - renderer = DonRenderer - return renderer(code).render(size, twoColor, opacity, penwidth) diff --git a/src/tests/mock/pybitmessage/queues.py b/src/tests/mock/pybitmessage/queues.py index 4a9b98d2..8f5aea07 100644 --- a/src/tests/mock/pybitmessage/queues.py +++ b/src/tests/mock/pybitmessage/queues.py @@ -5,10 +5,10 @@ import time from six.moves import queue -try: - from multiqueue import MultiQueue -except ImportError: - from .multiqueue import MultiQueue +# try: +# from multiqueue import MultiQueue +# except ImportError: +# from .multiqueue import MultiQueue class ObjectProcessorQueue(queue.Queue): @@ -44,8 +44,8 @@ addressGeneratorQueue = queue.Queue() #: `.network.ReceiveQueueThread` instances dump objects they hear #: on the network into this queue to be processed. objectProcessorQueue = ObjectProcessorQueue() -invQueue = MultiQueue() -addrQueue = MultiQueue() +# invQueue = MultiQueue() +# addrQueue = MultiQueue() portCheckerQueue = queue.Queue() receiveDataQueue = queue.Queue() #: The address generator thread uses this queue to get information back diff --git a/src/tests/mock/pybitmessage/randomtrackingdict.py b/src/tests/mock/pybitmessage/randomtrackingdict.py deleted file mode 100644 index 5bf19181..00000000 --- a/src/tests/mock/pybitmessage/randomtrackingdict.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -Track randomize ordered dict -""" -from threading import RLock -from time import time - -try: - import helper_random -except ImportError: - from . import helper_random - - -class RandomTrackingDict(object): - """ - Dict with randomised order and tracking. - - Keeps a track of how many items have been requested from the dict, - and timeouts. Resets after all objects have been retrieved and timed out. - The main purpose of this isn't as much putting related code together - as performance optimisation and anonymisation of downloading of objects - from other peers. If done using a standard dict or array, it takes - too much CPU (and looks convoluted). Randomisation helps with anonymity. - """ - # pylint: disable=too-many-instance-attributes - maxPending = 10 - pendingTimeout = 60 - - def __init__(self): - self.dictionary = {} - self.indexDict = [] - self.len = 0 - self.pendingLen = 0 - self.lastPoll = 0 - self.lastObject = 0 - self.lock = RLock() - - def __len__(self): - return self.len - - def __contains__(self, key): - return key in self.dictionary - - def __getitem__(self, key): - return self.dictionary[key][1] - - def _swap(self, i1, i2): - with self.lock: - key1 = self.indexDict[i1] - key2 = self.indexDict[i2] - self.indexDict[i1] = key2 - self.indexDict[i2] = key1 - self.dictionary[key1][0] = i2 - self.dictionary[key2][0] = i1 - # for quick reassignment - return i2 - - def __setitem__(self, key, value): - with self.lock: - if key in self.dictionary: - self.dictionary[key][1] = value - else: - self.indexDict.append(key) - self.dictionary[key] = [self.len, value] - self._swap(self.len, self.len - self.pendingLen) - self.len += 1 - - def __delitem__(self, key): - if key not in self.dictionary: - raise KeyError - with self.lock: - index = self.dictionary[key][0] - # not pending - if index < self.len - self.pendingLen: - # left of pending part - index = self._swap(index, self.len - self.pendingLen - 1) - # pending - else: - self.pendingLen -= 1 - # end - self._swap(index, self.len - 1) - # if the following del is batched, performance of this single - # operation can improve 4x, but it's already very fast so we'll - # ignore it for the time being - del self.indexDict[-1] - del self.dictionary[key] - self.len -= 1 - - def setMaxPending(self, maxPending): - """ - Sets maximum number of objects that can be retrieved from the class - simultaneously as long as there is no timeout - """ - self.maxPending = maxPending - - def setPendingTimeout(self, pendingTimeout): - """Sets how long to wait for a timeout if max pending is reached - (or all objects have been retrieved)""" - self.pendingTimeout = pendingTimeout - - def setLastObject(self): - """Update timestamp for tracking of received objects""" - self.lastObject = time() - - def randomKeys(self, count=1): - """Retrieve count random keys from the dict - that haven't already been retrieved""" - if self.len == 0 or ( - (self.pendingLen >= self.maxPending or self.pendingLen == self.len) - and self.lastPoll + self.pendingTimeout > time()): - raise KeyError - - # pylint: disable=redefined-outer-name - with self.lock: - # reset if we've requested all - # and if last object received too long time ago - if self.pendingLen == self.len and self.lastObject + \ - self.pendingTimeout < time(): - self.pendingLen = 0 - self.setLastObject() - available = self.len - self.pendingLen - if count > available: - count = available - randomIndex = helper_random.randomsample( - range(self.len - self.pendingLen), count) - retval = [self.indexDict[i] for i in randomIndex] - - for i in sorted(randomIndex, reverse=True): - # swap with one below lowest pending - self._swap(i, self.len - self.pendingLen - 1) - self.pendingLen += 1 - self.lastPoll = time() - return retval diff --git a/src/tests/mock/pybitmessage/shared.py b/src/tests/mock/pybitmessage/shared.py deleted file mode 100644 index 4a654932..00000000 --- a/src/tests/mock/pybitmessage/shared.py +++ /dev/null @@ -1,255 +0,0 @@ -""" -Some shared functions - -.. deprecated:: 0.6.3 - Should be moved to different places and this file removed, - but it needs refactoring. -""" -from __future__ import division - -# Libraries. -import hashlib -import os -import stat -import subprocess -import sys -from binascii import hexlify - -# Project imports. -import highlevelcrypto -import state -from addresses import decodeAddress, encodeVarint -from bmconfigparser import BMConfigParser -from debug import logger -from helper_sql import sqlQuery - -from pyelliptic import arithmetic - - -myECCryptorObjects = {} -MyECSubscriptionCryptorObjects = {} -# The key in this dictionary is the RIPE hash which is encoded -# in an address and value is the address itself. -myAddressesByHash = {} -# The key in this dictionary is the tag generated from the address. -myAddressesByTag = {} -broadcastSendersForWhichImWatching = {} - - -def isAddressInMyAddressBook(address): - """Is address in my addressbook?""" - queryreturn = sqlQuery( - '''select address from addressbook where address=?''', - address) - return queryreturn != [] - - -# At this point we should really just have a isAddressInMy(book, address)... -def isAddressInMySubscriptionsList(address): - """Am I subscribed to this address?""" - queryreturn = sqlQuery( - '''select * from subscriptions where address=?''', - str(address)) - return queryreturn != [] - - -def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address): - """ - Am I subscribed to this address, is it in my addressbook or whitelist? - """ - if isAddressInMyAddressBook(address): - return True - - queryreturn = sqlQuery( - '''SELECT address FROM whitelist where address=?''' - ''' and enabled = '1' ''', - address) - if queryreturn != []: - return True - - queryreturn = sqlQuery( - '''select address from subscriptions where address=?''' - ''' and enabled = '1' ''', - address) - if queryreturn != []: - return True - return False - - -def decodeWalletImportFormat(WIFstring): - # pylint: disable=inconsistent-return-statements - """ - Convert private key from base58 that's used in the config file to - 8-bit binary string - """ - fullString = arithmetic.changebase(WIFstring, 58, 256) - privkey = fullString[:-4] - if fullString[-4:] != \ - hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]: - logger.critical( - 'Major problem! When trying to decode one of your' - ' private keys, the checksum failed. Here are the first' - ' 6 characters of the PRIVATE key: %s', - str(WIFstring)[:6] - ) - os._exit(0) # pylint: disable=protected-access - # return "" - elif privkey[0] == '\x80': # checksum passed - return privkey[1:] - - logger.critical( - 'Major problem! When trying to decode one of your private keys,' - ' the checksum passed but the key doesn\'t begin with hex 80.' - ' Here is the PRIVATE key: %s', WIFstring - ) - os._exit(0) # pylint: disable=protected-access - - -def reloadMyAddressHashes(): - """Reload keys for user's addresses from the config file""" - logger.debug('reloading keys from keys.dat file') - myECCryptorObjects.clear() - myAddressesByHash.clear() - myAddressesByTag.clear() - # myPrivateKeys.clear() - - keyfileSecure = checkSensitiveFilePermissions(os.path.join( - state.appdata, 'keys.dat')) - hasEnabledKeys = False - for addressInKeysFile in BMConfigParser().addresses(): - isEnabled = BMConfigParser().getboolean(addressInKeysFile, 'enabled') - if isEnabled: - hasEnabledKeys = True - # status - addressVersionNumber, streamNumber, hashobj = decodeAddress(addressInKeysFile)[1:] - if addressVersionNumber in (2, 3, 4): - # Returns a simple 32 bytes of information encoded - # in 64 Hex characters, or null if there was an error. - privEncryptionKey = hexlify(decodeWalletImportFormat( - BMConfigParser().get(addressInKeysFile, 'privencryptionkey'))) - # It is 32 bytes encoded as 64 hex characters - if len(privEncryptionKey) == 64: - myECCryptorObjects[hashobj] = \ - highlevelcrypto.makeCryptor(privEncryptionKey) - myAddressesByHash[hashobj] = addressInKeysFile - tag = hashlib.sha512(hashlib.sha512( - encodeVarint(addressVersionNumber) - + encodeVarint(streamNumber) + hashobj).digest()).digest()[32:] - myAddressesByTag[tag] = addressInKeysFile - else: - logger.error( - 'Error in reloadMyAddressHashes: Can\'t handle' - ' address versions other than 2, 3, or 4.' - ) - - if not keyfileSecure: - fixSensitiveFilePermissions(os.path.join( - state.appdata, 'keys.dat'), hasEnabledKeys) - - -def reloadBroadcastSendersForWhichImWatching(): - """ - Reinitialize runtime data for the broadcasts I'm subscribed to - from the config file - """ - broadcastSendersForWhichImWatching.clear() - MyECSubscriptionCryptorObjects.clear() - queryreturn = sqlQuery('SELECT address FROM subscriptions where enabled=1') - logger.debug('reloading subscriptions...') - for row in queryreturn: - address, = row - # status - addressVersionNumber, streamNumber, hashobj = decodeAddress(address)[1:] - if addressVersionNumber == 2: - broadcastSendersForWhichImWatching[hashobj] = 0 - # Now, for all addresses, even version 2 addresses, - # we should create Cryptor objects in a dictionary which we will - # use to attempt to decrypt encrypted broadcast messages. - - if addressVersionNumber <= 3: - privEncryptionKey = hashlib.sha512( - encodeVarint(addressVersionNumber) - + encodeVarint(streamNumber) + hashobj - ).digest()[:32] - MyECSubscriptionCryptorObjects[hashobj] = \ - highlevelcrypto.makeCryptor(hexlify(privEncryptionKey)) - else: - doubleHashOfAddressData = hashlib.sha512(hashlib.sha512( - encodeVarint(addressVersionNumber) - + encodeVarint(streamNumber) + hashobj - ).digest()).digest() - tag = doubleHashOfAddressData[32:] - privEncryptionKey = doubleHashOfAddressData[:32] - MyECSubscriptionCryptorObjects[tag] = \ - highlevelcrypto.makeCryptor(hexlify(privEncryptionKey)) - - -def fixPotentiallyInvalidUTF8Data(text): - """Sanitise invalid UTF-8 strings""" - try: - text.decode('utf-8') - return text - except UnicodeDecodeError: - return 'Part of the message is corrupt. The message cannot be' \ - ' displayed the normal way.\n\n' + repr(text) - - -def checkSensitiveFilePermissions(filename): - """ - :param str filename: path to the file - :return: True if file appears to have appropriate permissions. - """ - if sys.platform == 'win32': - # .. todo:: This might deserve extra checks by someone familiar with - # Windows systems. - return True - elif sys.platform[:7] == 'freebsd': - # FreeBSD file systems are the same as major Linux file systems - present_permissions = os.stat(filename)[0] - disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO - return present_permissions & disallowed_permissions == 0 - try: - # Skip known problems for non-Win32 filesystems - # without POSIX permissions. - fstype = subprocess.check_output( - 'stat -f -c "%%T" %s' % (filename), - shell=True, - stderr=subprocess.STDOUT - ) - if 'fuseblk' in fstype: - logger.info( - 'Skipping file permissions check for %s.' - ' Filesystem fuseblk detected.', filename) - return True - except: # noqa:E722 - # Swallow exception here, but we might run into trouble later! - logger.error('Could not determine filesystem type. %s', filename) - present_permissions = os.stat(filename)[0] - disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO - return present_permissions & disallowed_permissions == 0 - - -# Fixes permissions on a sensitive file. -def fixSensitiveFilePermissions(filename, hasEnabledKeys): - """Try to change file permissions to be more restrictive""" - if hasEnabledKeys: - logger.warning( - 'Keyfile had insecure permissions, and there were enabled' - ' keys. The truly paranoid should stop using them immediately.') - else: - logger.warning( - 'Keyfile had insecure permissions, but there were no enabled keys.' - ) - try: - present_permissions = os.stat(filename)[0] - disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO - allowed_permissions = ((1 << 32) - 1) ^ disallowed_permissions - new_permissions = ( - allowed_permissions & present_permissions) - os.chmod(filename, new_permissions) - - logger.info('Keyfile permissions automatically fixed.') - - except Exception: - logger.exception('Keyfile permissions could not be fixed.') - raise diff --git a/src/tests/mock/pybitmessage/shutdown.py b/src/tests/mock/pybitmessage/shutdown.py deleted file mode 100644 index 3e2b8ca8..00000000 --- a/src/tests/mock/pybitmessage/shutdown.py +++ /dev/null @@ -1,91 +0,0 @@ -"""shutdown function""" - -import os -import threading -import time - -from six.moves import queue - -import state -from debug import logger -from helper_sql import sqlQuery, sqlStoredProcedure -from inventory import Inventory -from network import StoppableThread -from network.knownnodes import saveKnownNodes -from queues import ( - addressGeneratorQueue, objectProcessorQueue, UISignalQueue, workerQueue) - - -def doCleanShutdown(): - """ - Used to tell all the treads to finish work and exit. - """ - state.shutdown = 1 - - objectProcessorQueue.put(('checkShutdownVariable', 'no data')) - for thread in threading.enumerate(): - if thread.isAlive() and isinstance(thread, StoppableThread): - thread.stopThread() - - UISignalQueue.put(( - 'updateStatusBar', - 'Saving the knownNodes list of peers to disk...')) - logger.info('Saving knownNodes list of peers to disk') - saveKnownNodes() - logger.info('Done saving knownNodes list of peers to disk') - UISignalQueue.put(( - 'updateStatusBar', - 'Done saving the knownNodes list of peers to disk.')) - logger.info('Flushing inventory in memory out to disk...') - UISignalQueue.put(( - 'updateStatusBar', - 'Flushing inventory in memory out to disk.' - ' This should normally only take a second...')) - Inventory().flush() - - # Verify that the objectProcessor has finished exiting. It should have - # incremented the shutdown variable from 1 to 2. This must finish before - # we command the sqlThread to exit. - while state.shutdown == 1: - time.sleep(.1) - - # Wait long enough to guarantee that any running proof of work worker - # threads will check the shutdown variable and exit. If the main thread - # closes before they do then they won't stop. - time.sleep(.25) - - for thread in threading.enumerate(): - if ( - thread is not threading.currentThread() - and isinstance(thread, StoppableThread) - and thread.name != 'SQL' - ): - logger.debug("Waiting for thread %s", thread.name) - thread.join() - - # This one last useless query will guarantee that the previous flush - # committed and that the - # objectProcessorThread committed before we close the program. - sqlQuery('SELECT address FROM subscriptions') - logger.info('Finished flushing inventory.') - sqlStoredProcedure('exit') - - # flush queues - for q in ( - workerQueue, UISignalQueue, addressGeneratorQueue, - objectProcessorQueue): - while True: - try: - q.get(False) - q.task_done() - except queue.Empty: - break - - if state.thisapp.daemon or not state.enableGUI: - logger.info('Clean shutdown complete.') - state.thisapp.cleanup() - os._exit(0) # pylint: disable=protected-access - else: - logger.info('Core shutdown complete.') - for thread in threading.enumerate(): - logger.debug('Thread %s still running', thread.name) diff --git a/src/tests/mock/pybitmessage/singleinstance.py b/src/tests/mock/pybitmessage/singleinstance.py deleted file mode 100644 index 660dcf54..00000000 --- a/src/tests/mock/pybitmessage/singleinstance.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -This is based upon the singleton class from -`tendo `_ -which is under the Python Software Foundation License version 2 -""" - -import atexit -import os -import sys - -import state - -try: - import fcntl # @UnresolvedImport -except ImportError: - pass - - -class singleinstance(object): - """ - Implements a single instance application by creating a lock file - at appdata. - """ - def __init__(self, flavor_id="", daemon=False): - self.initialized = False - self.counter = 0 - self.daemon = daemon - self.lockPid = None - self.lockfile = os.path.normpath( - os.path.join(state.appdata, 'singleton%s.lock' % flavor_id)) - - if state.enableGUI and not self.daemon and not state.curses: - # Tells the already running (if any) application to get focus. - import bitmessageqt - bitmessageqt.init() - - self.lock() - - self.initialized = True - atexit.register(self.cleanup) - - def lock(self): - """Obtain single instance lock""" - if self.lockPid is None: - self.lockPid = os.getpid() - if sys.platform == 'win32': - try: - # file already exists, we try to remove - # (in case previous execution was interrupted) - if os.path.exists(self.lockfile): - os.unlink(self.lockfile) - self.fd = os.open( - self.lockfile, - os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC - ) - except OSError as e: - if e.errno == 13: - sys.exit( - 'Another instance of this application is' - ' already running') - raise - else: - pidLine = "%i\n" % self.lockPid - os.write(self.fd, pidLine) - else: # non Windows - self.fp = open(self.lockfile, 'a+') - try: - if self.daemon and self.lockPid != os.getpid(): - # wait for parent to finish - fcntl.lockf(self.fp, fcntl.LOCK_EX) - else: - fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB) - self.lockPid = os.getpid() - except IOError: - sys.exit( - 'Another instance of this application is' - ' already running') - else: - pidLine = "%i\n" % self.lockPid - self.fp.truncate(0) - self.fp.write(pidLine) - self.fp.flush() - - def cleanup(self): - """Release single instance lock""" - if not self.initialized: - return - if self.daemon and self.lockPid == os.getpid(): - # these are the two initial forks while daemonizing - try: - if sys.platform == 'win32': - if hasattr(self, 'fd'): - os.close(self.fd) - else: - fcntl.lockf(self.fp, fcntl.LOCK_UN) - except Exception: - pass - - return - - try: - if sys.platform == 'win32': - if hasattr(self, 'fd'): - os.close(self.fd) - os.unlink(self.lockfile) - else: - fcntl.lockf(self.fp, fcntl.LOCK_UN) - if os.path.isfile(self.lockfile): - os.unlink(self.lockfile) - except Exception: - pass diff --git a/src/tests/mock/pybitmessage/testmode_init.py b/src/tests/mock/pybitmessage/testmode_init.py deleted file mode 100644 index a088afc1..00000000 --- a/src/tests/mock/pybitmessage/testmode_init.py +++ /dev/null @@ -1,40 +0,0 @@ -import time -import uuid - -import helper_inbox -import helper_sql - -# from .tests.samples import sample_inbox_msg_ids, sample_deterministic_addr4 -sample_deterministic_addr4 = 'BM-2cWzSnwjJ7yRP3nLEWUV5LisTZyREWSzUK' -sample_inbox_msg_ids = ['27e644765a3e4b2e973ee7ccf958ea20', '51fc5531-3989-4d69-bbb5-68d64b756f5b', - '2c975c515f8b414db5eea60ba57ba455', 'bc1f2d8a-681c-4cc0-9a12-6067c7e1ac24'] - - -def populate_api_test_data(): - '''Adding test records in inbox table''' - helper_sql.sql_ready.wait() - - test1 = ( - sample_inbox_msg_ids[0], sample_deterministic_addr4, - sample_deterministic_addr4, 'Test1 subject', int(time.time()), - 'Test1 body', 'inbox', 2, 0, uuid.uuid4().bytes - ) - test2 = ( - sample_inbox_msg_ids[1], sample_deterministic_addr4, - sample_deterministic_addr4, 'Test2 subject', int(time.time()), - 'Test2 body', 'inbox', 2, 0, uuid.uuid4().bytes - ) - test3 = ( - sample_inbox_msg_ids[2], sample_deterministic_addr4, - sample_deterministic_addr4, 'Test3 subject', int(time.time()), - 'Test3 body', 'inbox', 2, 0, uuid.uuid4().bytes - ) - test4 = ( - sample_inbox_msg_ids[3], sample_deterministic_addr4, - sample_deterministic_addr4, 'Test4 subject', int(time.time()), - 'Test4 body', 'inbox', 2, 0, uuid.uuid4().bytes - ) - helper_inbox.insert(test1) - helper_inbox.insert(test2) - helper_inbox.insert(test3) - helper_inbox.insert(test4) diff --git a/src/tests/mock/pybitmessage/threads.py b/src/tests/mock/pybitmessage/threads.py index ac8bf7a6..336aae5f 100644 --- a/src/tests/mock/pybitmessage/threads.py +++ b/src/tests/mock/pybitmessage/threads.py @@ -1,48 +1,33 @@ -""" -PyBitmessage does various tasks in separate threads. Most of them inherit -from `.network.StoppableThread`. There are `addressGenerator` for -addresses generation, `objectProcessor` for processing the network objects -passed minimal validation, `singleCleaner` to periodically clean various -internal storages (like inventory and knownnodes) and do forced garbage -collection, `singleWorker` for doing PoW, `sqlThread` for querying sqlite -database. - -There are also other threads in the `.network` package. - -:func:`set_thread_name` is defined here for the threads that don't inherit from -:class:`.network.StoppableThread` -""" +"""Threading primitives for the network package""" +import logging +import random import threading -from class_addressGenerator import addressGenerator -from class_objectProcessor import objectProcessor -from class_singleCleaner import singleCleaner -from class_singleWorker import singleWorker -from class_sqlThread import sqlThread -try: - import prctl -except ImportError: - def set_thread_name(name): - """Set a name for the thread for python internal use.""" - threading.current_thread().name = name -else: - def set_thread_name(name): - """Set the thread name for external use (visible from the OS).""" - prctl.set_name(name) +class StoppableThread(threading.Thread): + """Base class for application threads with stopThread method""" + name = None + logger = logging.getLogger('default') - def _thread_name_hack(self): - set_thread_name(self.name) - threading.Thread.__bootstrap_original__(self) - # pylint: disable=protected-access - threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap - threading.Thread._Thread__bootstrap = _thread_name_hack + def __init__(self, name=None): + if name: + self.name = name + super(StoppableThread, self).__init__(name=self.name) + self.stop = threading.Event() + self._stopped = False + random.seed() + self.logger.info('Init thread %s', self.name) + + def stopThread(self): + """Stop the thread""" + self._stopped = True + self.stop.set() -printLock = threading.Lock() - -__all__ = [ - "addressGenerator", "objectProcessor", "singleCleaner", "singleWorker", - "sqlThread", "printLock" -] +class BusyError(threading.ThreadError): + """ + Thread error raised when another connection holds the lock + we are trying to acquire. + """ + pass diff --git a/src/tests/mock/pybitmessage/tr.py b/src/tests/mock/pybitmessage/tr.py deleted file mode 100644 index eec82c37..00000000 --- a/src/tests/mock/pybitmessage/tr.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Translating text -""" -import os - -try: - import state -except ImportError: - from . import state - - -class translateClass: - """ - This is used so that the translateText function can be used - when we are in daemon mode and not using any QT functions. - """ - # pylint: disable=old-style-class,too-few-public-methods - def __init__(self, context, text): - self.context = context - self.text = text - - def arg(self, _): - """Replace argument placeholders""" - if '%' in self.text: - # This doesn't actually do anything with the arguments - # because we don't have a UI in which to display this information anyway. - return translateClass(self.context, self.text.replace('%', '', 1)) - return self.text - - -def _translate(context, text, disambiguation=None, encoding=None, n=None): - # pylint: disable=unused-argument - return translateText(context, text, n) - - -def translateText(context, text, n=None): - """Translate text in context""" - try: - enableGUI = state.enableGUI - except AttributeError: # inside the plugin - enableGUI = True - if enableGUI: - try: - from PyQt4 import QtCore, QtGui - except Exception as err: - print('PyBitmessage requires PyQt unless you want to run it as a daemon' - ' and interact with it using the API.' - ' You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download' - ' or by searching Google for \'PyQt Download\'.' - ' If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon') - print('Error message:', err) - os._exit(0) # pylint: disable=protected-access - if n is None: - return QtGui.QApplication.translate(context, text) - return QtGui.QApplication.translate(context, text, None, QtCore.QCoreApplication.CodecForTr, n) - else: - if '%' in text: - return translateClass(context, text.replace('%', '', 1)) - return text diff --git a/src/tests/mock/pybitmessage/upnp.py b/src/tests/mock/pybitmessage/upnp.py deleted file mode 100644 index c6db487b..00000000 --- a/src/tests/mock/pybitmessage/upnp.py +++ /dev/null @@ -1,348 +0,0 @@ -# pylint: disable=too-many-statements,too-many-branches,protected-access,no-self-use -""" -Complete UPnP port forwarding implementation in separate thread. -Reference: http://mattscodecave.com/posts/using-python-and-upnp-to-forward-a-port -""" - -import httplib -import socket -import time -import urllib2 -from random import randint -from urlparse import urlparse -from xml.dom.minidom import Document, parseString - -import queues -import state -import tr -from bmconfigparser import BMConfigParser -from debug import logger -from network import BMConnectionPool, knownnodes, StoppableThread -from network.node import Peer - - -def createRequestXML(service, action, arguments=None): - """Router UPnP requests are XML formatted""" - - doc = Document() - - # create the envelope element and set its attributes - envelope = doc.createElementNS('', 's:Envelope') - envelope.setAttribute('xmlns:s', 'http://schemas.xmlsoap.org/soap/envelope/') - envelope.setAttribute('s:encodingStyle', 'http://schemas.xmlsoap.org/soap/encoding/') - - # create the body element - body = doc.createElementNS('', 's:Body') - - # create the function element and set its attribute - fn = doc.createElementNS('', 'u:%s' % action) - fn.setAttribute('xmlns:u', 'urn:schemas-upnp-org:service:%s' % service) - - # setup the argument element names and values - # using a list of tuples to preserve order - - # container for created nodes - argument_list = [] - - # iterate over arguments, create nodes, create text nodes, - # append text nodes to nodes, and finally add the ready product - # to argument_list - if arguments is not None: - for k, v in arguments: - tmp_node = doc.createElement(k) - tmp_text_node = doc.createTextNode(v) - tmp_node.appendChild(tmp_text_node) - argument_list.append(tmp_node) - - # append the prepared argument nodes to the function element - for arg in argument_list: - fn.appendChild(arg) - - # append function element to the body element - body.appendChild(fn) - - # append body element to envelope element - envelope.appendChild(body) - - # append envelope element to document, making it the root element - doc.appendChild(envelope) - - # our tree is ready, conver it to a string - return doc.toxml() - - -class UPnPError(Exception): - """Handle a UPnP error""" - - def __init__(self, message): - super(UPnPError, self).__init__() - logger.error(message) - - -class Router: # pylint: disable=old-style-class - """Encapulate routing""" - name = "" - path = "" - address = None - routerPath = None - extPort = None - - def __init__(self, ssdpResponse, address): - - self.address = address - - row = ssdpResponse.split('\r\n') - header = {} - for i in range(1, len(row)): - part = row[i].split(': ') - if len(part) == 2: - header[part[0].lower()] = part[1] - - try: - self.routerPath = urlparse(header['location']) - if not self.routerPath or not hasattr(self.routerPath, "hostname"): - logger.error("UPnP: no hostname: %s", header['location']) - except KeyError: - logger.error("UPnP: missing location header") - - # get the profile xml file and read it into a variable - directory = urllib2.urlopen(header['location']).read() - - # create a DOM object that represents the `directory` document - dom = parseString(directory) - - self.name = dom.getElementsByTagName('friendlyName')[0].childNodes[0].data - # find all 'serviceType' elements - service_types = dom.getElementsByTagName('serviceType') - - for service in service_types: - if service.childNodes[0].data.find('WANIPConnection') > 0 or \ - service.childNodes[0].data.find('WANPPPConnection') > 0: - self.path = service.parentNode.getElementsByTagName('controlURL')[0].childNodes[0].data - self.upnp_schema = service.childNodes[0].data.split(':')[-2] - - def AddPortMapping( - self, - externalPort, - internalPort, - internalClient, - protocol, - description, - leaseDuration=0, - enabled=1, - ): # pylint: disable=too-many-arguments - """Add UPnP port mapping""" - - resp = self.soapRequest(self.upnp_schema + ':1', 'AddPortMapping', [ - ('NewRemoteHost', ''), - ('NewExternalPort', str(externalPort)), - ('NewProtocol', protocol), - ('NewInternalPort', str(internalPort)), - ('NewInternalClient', internalClient), - ('NewEnabled', str(enabled)), - ('NewPortMappingDescription', str(description)), - ('NewLeaseDuration', str(leaseDuration)) - ]) - self.extPort = externalPort - logger.info("Successfully established UPnP mapping for %s:%i on external port %i", - internalClient, internalPort, externalPort) - return resp - - def DeletePortMapping(self, externalPort, protocol): - """Delete UPnP port mapping""" - - resp = self.soapRequest(self.upnp_schema + ':1', 'DeletePortMapping', [ - ('NewRemoteHost', ''), - ('NewExternalPort', str(externalPort)), - ('NewProtocol', protocol), - ]) - logger.info("Removed UPnP mapping on external port %i", externalPort) - return resp - - def GetExternalIPAddress(self): - """Get the external address""" - - resp = self.soapRequest( - self.upnp_schema + ':1', 'GetExternalIPAddress') - dom = parseString(resp.read()) - return dom.getElementsByTagName( - 'NewExternalIPAddress')[0].childNodes[0].data - - def soapRequest(self, service, action, arguments=None): - """Make a request to a router""" - - conn = httplib.HTTPConnection(self.routerPath.hostname, self.routerPath.port) - conn.request( - 'POST', - self.path, - createRequestXML(service, action, arguments), - { - 'SOAPAction': '"urn:schemas-upnp-org:service:%s#%s"' % (service, action), - 'Content-Type': 'text/xml' - } - ) - resp = conn.getresponse() - conn.close() - if resp.status == 500: - respData = resp.read() - try: - dom = parseString(respData) - errinfo = dom.getElementsByTagName('errorDescription') - if errinfo: - logger.error("UPnP error: %s", respData) - raise UPnPError(errinfo[0].childNodes[0].data) - except: # noqa:E722 - raise UPnPError("Unable to parse SOAP error: %s" % (respData)) - return resp - - -class uPnPThread(StoppableThread): - """Start a thread to handle UPnP activity""" - - SSDP_ADDR = "239.255.255.250" - GOOGLE_DNS = "8.8.8.8" - SSDP_PORT = 1900 - SSDP_MX = 2 - SSDP_ST = "urn:schemas-upnp-org:device:InternetGatewayDevice:1" - - def __init__(self): - super(uPnPThread, self).__init__(name="uPnPThread") - self.extPort = BMConfigParser().safeGetInt('bitmessagesettings', 'extport', default=None) - self.localIP = self.getLocalIP() - self.routers = [] - self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - self.sock.bind((self.localIP, 0)) - self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) - self.sock.settimeout(5) - self.sendSleep = 60 - - def run(self): - """Start the thread to manage UPnP activity""" - - logger.debug("Starting UPnP thread") - logger.debug("Local IP: %s", self.localIP) - lastSent = 0 - - # wait until asyncore binds so that we know the listening port - bound = False - while state.shutdown == 0 and not self._stopped and not bound: - for s in BMConnectionPool().listeningSockets.values(): - if s.is_bound(): - bound = True - if not bound: - time.sleep(1) - - # pylint: disable=attribute-defined-outside-init - self.localPort = BMConfigParser().getint('bitmessagesettings', 'port') - - while state.shutdown == 0 and BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp'): - if time.time() - lastSent > self.sendSleep and not self.routers: - try: - self.sendSearchRouter() - except: # noqa:E722 - pass - lastSent = time.time() - try: - while state.shutdown == 0 and BMConfigParser().safeGetBoolean('bitmessagesettings', 'upnp'): - resp, (ip, _) = self.sock.recvfrom(1000) - if resp is None: - continue - newRouter = Router(resp, ip) - for router in self.routers: - if router.routerPath == newRouter.routerPath: - break - else: - logger.debug("Found UPnP router at %s", ip) - self.routers.append(newRouter) - self.createPortMapping(newRouter) - try: - self_peer = Peer( - newRouter.GetExternalIPAddress(), - self.extPort - ) - except: # noqa:E722 - logger.debug('Failed to get external IP') - else: - with knownnodes.knownNodesLock: - knownnodes.addKnownNode( - 1, self_peer, is_self=True) - queues.UISignalQueue.put(('updateStatusBar', tr._translate( - "MainWindow", 'UPnP port mapping established on port %1' - ).arg(str(self.extPort)))) - break - except socket.timeout: - pass - except: # noqa:E722 - logger.error("Failure running UPnP router search.", exc_info=True) - for router in self.routers: - if router.extPort is None: - self.createPortMapping(router) - try: - self.sock.shutdown(socket.SHUT_RDWR) - except: # noqa:E722 - pass - try: - self.sock.close() - except: # noqa:E722 - pass - deleted = False - for router in self.routers: - if router.extPort is not None: - deleted = True - self.deletePortMapping(router) - if deleted: - queues.UISignalQueue.put(('updateStatusBar', tr._translate("MainWindow", 'UPnP port mapping removed'))) - logger.debug("UPnP thread done") - - def getLocalIP(self): - """Get the local IP of the node""" - - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) - s.connect((uPnPThread.GOOGLE_DNS, 1)) - return s.getsockname()[0] - - def sendSearchRouter(self): - """Querying for UPnP services""" - - ssdpRequest = "M-SEARCH * HTTP/1.1\r\n" + \ - "HOST: %s:%d\r\n" % (uPnPThread.SSDP_ADDR, uPnPThread.SSDP_PORT) + \ - "MAN: \"ssdp:discover\"\r\n" + \ - "MX: %d\r\n" % (uPnPThread.SSDP_MX, ) + \ - "ST: %s\r\n" % (uPnPThread.SSDP_ST, ) + "\r\n" - - try: - logger.debug("Sending UPnP query") - self.sock.sendto(ssdpRequest, (uPnPThread.SSDP_ADDR, uPnPThread.SSDP_PORT)) - except: # noqa:E722 - logger.exception("UPnP send query failed") - - def createPortMapping(self, router): - """Add a port mapping""" - - for i in range(50): - try: - localIP = self.localIP - if i == 0: - extPort = self.localPort # try same port first - elif i == 1 and self.extPort: - extPort = self.extPort # try external port from last time next - else: - extPort = randint(32767, 65535) - logger.debug( - "Attempt %i, requesting UPnP mapping for %s:%i on external port %i", - i, - localIP, - self.localPort, - extPort) - router.AddPortMapping(extPort, self.localPort, localIP, 'TCP', 'BitMessage') - self.extPort = extPort - BMConfigParser().set('bitmessagesettings', 'extport', str(extPort)) - BMConfigParser().save() - break - except UPnPError: - logger.debug("UPnP error: ", exc_info=True) - - def deletePortMapping(self, router): - """Delete a port mapping""" - router.DeletePortMapping(router.extPort, 'TCP') diff --git a/src/tests/mock/pybitmessage/version.py b/src/tests/mock/pybitmessage/version.py deleted file mode 100644 index 076b8c56..00000000 --- a/src/tests/mock/pybitmessage/version.py +++ /dev/null @@ -1,2 +0,0 @@ -softwareName = 'PyBitmessage' -softwareVersion = '0.6.3.2' -- 2.47.2 From f614cb5862476dd2379652bc4448d02be176d04f Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Fri, 17 Dec 2021 17:09:38 +0530 Subject: [PATCH 08/10] Fixed kivy import errors & tested the hello world application popup --- src/tests/mock/kivy_main.py | 6 +++--- src/tests/mock/pybitmessage/mpybit.py | 28 +++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 3 deletions(-) create mode 100644 src/tests/mock/pybitmessage/mpybit.py diff --git a/src/tests/mock/kivy_main.py b/src/tests/mock/kivy_main.py index badc1dc1..9c68f3af 100644 --- a/src/tests/mock/kivy_main.py +++ b/src/tests/mock/kivy_main.py @@ -1,14 +1,14 @@ """Mock kivy app with mock threads.""" from pybitmessage import state -from pybitmessage.bitmessagekivy.mpybit import NavigateApp -from class_addressGenerator import FakeAddressGenerator +from pybitmessage.mpybit import NavigateApp +from pybitmessage.class_addressGenerator import addressGenerator def main(): """main method for starting threads""" # Start the address generation thread - addressGeneratorThread = FakeAddressGenerator() + addressGeneratorThread = addressGenerator() # close the main program even if there are threads left addressGeneratorThread.daemon = True addressGeneratorThread.start() diff --git a/src/tests/mock/pybitmessage/mpybit.py b/src/tests/mock/pybitmessage/mpybit.py new file mode 100644 index 00000000..b44b1070 --- /dev/null +++ b/src/tests/mock/pybitmessage/mpybit.py @@ -0,0 +1,28 @@ +""" + Dummy implementation for kivy Desktop and android(mobile) interface +""" +# pylint: disable=too-few-public-methods + +from kivy.app import App +from kivy.uix.label import Label + + +class NavigateApp(App): + """Navigation Layout of class""" + + def build(self): + """Method builds the widget""" + # pylint: disable=no-self-use + return Label(text="Hello World !") + + def clickNavDrawer(self): + """method for clicking navigation drawer""" + pass + + def addingtoaddressbook(self): + """method for clicking address book popup""" + pass + + +if __name__ == '__main__': + NavigateApp().run() -- 2.47.2 From a9fb41821a0e562dcea831899b9abc3c1290e343 Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Fri, 17 Dec 2021 17:26:18 +0530 Subject: [PATCH 09/10] Fixed code quality --- src/tests/mock/bitmessagemock.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/tests/mock/bitmessagemock.py b/src/tests/mock/bitmessagemock.py index 397e77f8..7b6851be 100644 --- a/src/tests/mock/bitmessagemock.py +++ b/src/tests/mock/bitmessagemock.py @@ -1,3 +1,6 @@ +""" +Bitmessage mock +""" from pybitmessage.class_addressGenerator import addressGenerator from pybitmessage.class_singleWorker import singleWorker from pybitmessage.class_objectProcessor import objectProcessor @@ -5,12 +8,14 @@ from pybitmessage.inventory import Inventory from pybitmessage.bmconfigparser import BMConfigParser -class MockMain(): +# pylint: disable=too-few-public-methods +class MockMain: """Mock main function""" + # pylint: disable=no-self-use def start(self): """Start main application""" - # pylint: disable=too-many-statements,too-many-branches,too-many-locals + # pylint: disable=too-many-statements,too-many-branches,too-many-locals, unused-variable config = BMConfigParser() daemon = config.safeGetBoolean('bitmessagesettings', 'daemon') -- 2.47.2 From fbfbc560c6270f6b4bfee4a5b58c4b5ec695e3c0 Mon Sep 17 00:00:00 2001 From: "kuldeep.k@cisinlabs.com" Date: Fri, 17 Dec 2021 19:04:10 +0530 Subject: [PATCH 10/10] Fixed pylint --- src/tests/mock/bitmessagemock.py | 2 +- src/tests/mock/pybitmessage/bmconfigparser.py | 2 ++ src/tests/mock/pybitmessage/class_addressGenerator.py | 9 ++------- src/tests/mock/pybitmessage/class_singleWorker.py | 6 +++--- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/src/tests/mock/bitmessagemock.py b/src/tests/mock/bitmessagemock.py index 7b6851be..c58c9c88 100644 --- a/src/tests/mock/bitmessagemock.py +++ b/src/tests/mock/bitmessagemock.py @@ -8,7 +8,7 @@ from pybitmessage.inventory import Inventory from pybitmessage.bmconfigparser import BMConfigParser -# pylint: disable=too-few-public-methods +# pylint: disable=too-few-public-methods,no-init,old-style-class class MockMain: """Mock main function""" diff --git a/src/tests/mock/pybitmessage/bmconfigparser.py b/src/tests/mock/pybitmessage/bmconfigparser.py index 4798dda4..efeac69b 100644 --- a/src/tests/mock/pybitmessage/bmconfigparser.py +++ b/src/tests/mock/pybitmessage/bmconfigparser.py @@ -69,6 +69,7 @@ class BMConfigParser(SafeConfigParser): raise ValueError("Invalid value %s" % value) return SafeConfigParser.set(self, section, option, value) + # pylint: disable=redefined-builtinm, too-many-return-statements def get(self, section, option, raw=False, vars=None): if sys.version_info[0] == 3: # pylint: disable=arguments-differ @@ -185,6 +186,7 @@ class BMConfigParser(SafeConfigParser): for section in self.sections(): for option in self.options(section): try: + # pylint: disable=unsubscriptable-object if not self.validate( section, option, self[section][option] diff --git a/src/tests/mock/pybitmessage/class_addressGenerator.py b/src/tests/mock/pybitmessage/class_addressGenerator.py index b86e9278..e5f59675 100644 --- a/src/tests/mock/pybitmessage/class_addressGenerator.py +++ b/src/tests/mock/pybitmessage/class_addressGenerator.py @@ -2,10 +2,6 @@ A thread for creating addresses """ -import logging -import random -import threading - from six.moves import queue from pybitmessage import state @@ -46,19 +42,18 @@ class addressGenerator(StoppableThread): address_list = list(fake_addresses.keys()) def stopThread(self): - + """"To stop address generator thread""" try: queues.addressGeneratorQueue.put(("stopThread", "data")) except queue.Full: self.logger.warning('addressGeneratorQueue is Full') - super(FakeAddressGenerator, self).stopThread() + super(addressGenerator, self).stopThread() def run(self): """ Process the requests for addresses generation from `.queues.addressGeneratorQueue` """ - import pdb;pdb.set_trace() while state.shutdown == 0: queueValue = queues.addressGeneratorQueue.get() try: diff --git a/src/tests/mock/pybitmessage/class_singleWorker.py b/src/tests/mock/pybitmessage/class_singleWorker.py index 924db8eb..617e835f 100644 --- a/src/tests/mock/pybitmessage/class_singleWorker.py +++ b/src/tests/mock/pybitmessage/class_singleWorker.py @@ -4,11 +4,11 @@ Thread for performing PoW from __future__ import division +from six.moves import queue + from pybitmessage import state from pybitmessage import queues - from pybitmessage.threads import StoppableThread -from six.moves import queue class singleWorker(StoppableThread): @@ -28,7 +28,7 @@ class singleWorker(StoppableThread): super(singleWorker, self).stopThread() def run(self): - + """To run single worker thread""" if state.shutdown > 0: return -- 2.47.2