Fix broken tests #1683
|
@ -7,7 +7,7 @@ import os
|
|||
import shutil
|
||||
from datetime import datetime
|
||||
|
||||
import state
|
||||
from paths import get_active_config_folder
|
||||
from singleton import Singleton
|
||||
|
||||
BMConfigDefaults = {
|
||||
|
@ -124,7 +124,16 @@ class BMConfigParser(ConfigParser.SafeConfigParser):
|
|||
return [
|
||||
x for x in BMConfigParser().sections() if x.startswith('BM-')]
|
||||
|
||||
def _reset(self):
|
||||
"""Reset current config. There doesn't appear to be a built in
|
||||
method for this"""
|
||||
sections = self.sections()
|
||||
for x in sections:
|
||||
self.remove_section(x)
|
||||
|
||||
def read(self, filenames):
|
||||
"""Read config and populate defaults"""
|
||||
self._reset()
|
||||
ConfigParser.ConfigParser.read(self, filenames)
|
||||
for section in self.sections():
|
||||
for option in self.options(section):
|
||||
|
@ -144,7 +153,7 @@ class BMConfigParser(ConfigParser.SafeConfigParser):
|
|||
|
||||
def save(self):
|
||||
"""Save the runtime config onto the filesystem"""
|
||||
fileName = os.path.join(state.appdata, 'keys.dat')
|
||||
fileName = os.path.join(get_active_config_folder(), 'keys.dat')
|
||||
fileNameBak = '.'.join([
|
||||
fileName, datetime.now().strftime("%Y%j%H%M%S%f"), 'bak'])
|
||||
# create a backup copy to prevent the accidental loss due to
|
||||
|
|
|
@ -28,7 +28,9 @@ class sqlThread(threading.Thread):
|
|||
|
||||
def run(self): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
|
||||
"""Process SQL queries from `.helper_sql.sqlSubmitQueue`"""
|
||||
self.conn = sqlite3.connect(state.appdata + 'messages.dat')
|
||||
self.conn = sqlite3.connect(
|
||||
os.path.join(paths.get_active_config_folder(),
|
||||
'messages.dat'))
|
||||
self.conn.text_factory = str
|
||||
self.cur = self.conn.cursor()
|
||||
|
||||
|
|
18
src/debug.py
|
@ -42,7 +42,8 @@ import os
|
|||
import sys
|
||||
|
||||
import helper_startup
|
||||
import state
|
||||
|
||||
from paths import get_active_config_folder
|
||||
|
||||
helper_startup.loadConfig()
|
||||
|
||||
|
@ -67,7 +68,7 @@ def configureLogging():
|
|||
sys.excepthook = log_uncaught_exceptions
|
||||
fail_msg = ''
|
||||
try:
|
||||
logging_config = os.path.join(state.appdata, 'logging.dat')
|
||||
logging_config = os.path.join(get_active_config_folder(), 'logging.dat')
|
||||
logging.config.fileConfig(
|
||||
logging_config, disable_existing_loggers=False)
|
||||
return (
|
||||
|
@ -103,7 +104,7 @@ def configureLogging():
|
|||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'formatter': 'default',
|
||||
'level': log_level,
|
||||
'filename': os.path.join(state.appdata, 'debug.log'),
|
||||
'filename': os.path.join(get_active_config_folder(), 'debug.log'),
|
||||
'maxBytes': 2097152, # 2 MiB
|
||||
'backupCount': 1,
|
||||
'encoding': 'UTF-8',
|
||||
|
@ -140,14 +141,23 @@ def resetLogging():
|
|||
"""Reconfigure logging in runtime when state.appdata dir changed"""
|
||||
# pylint: disable=global-statement, used-before-assignment
|
||||
global logger
|
||||
for i in logger.handlers:
|
||||
for i in list(logger.handlers):
|
||||
try:
|
||||
logger.removeHandler(i)
|
||||
except ValueError:
|
||||
pass
|
||||
i.flush()
|
||||
i.close()
|
||||
configureLogging()
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
|
||||
def flush_logs():
|
||||
"""Tell handlers to flush"""
|
||||
for i in list(logger.handlers):
|
||||
i.flush()
|
||||
|
||||
|
||||
# !
|
||||
preconfigured, msg = configureLogging()
|
||||
logger = logging.getLogger('default')
|
||||
|
|
|
@ -16,6 +16,7 @@ except ImportError:
|
|||
import state
|
||||
from bmconfigparser import BMConfigParser
|
||||
from network.node import Peer
|
||||
from paths import get_active_config_folder
|
||||
|
||||
knownNodesLock = threading.RLock()
|
||||
"""Thread lock for knownnodes modification"""
|
||||
|
@ -91,7 +92,7 @@ def pickle_deserialize_old_knownnodes(source):
|
|||
def saveKnownNodes(dirName=None):
|
||||
"""Save knownnodes to filesystem"""
|
||||
if dirName is None:
|
||||
dirName = state.appdata
|
||||
dirName = get_active_config_folder()
|
||||
with knownNodesLock:
|
||||
with open(os.path.join(dirName, 'knownnodes.dat'), 'wb') as output:
|
||||
json_serialize_knownnodes(output)
|
||||
|
@ -149,7 +150,9 @@ def createDefaultKnownNodes():
|
|||
def readKnownNodes():
|
||||
"""Load knownnodes from filesystem"""
|
||||
try:
|
||||
with open(state.appdata + 'knownnodes.dat', 'rb') as source:
|
||||
with open(os.path.join(get_active_config_folder(),
|
||||
'knownnodes.dat'),
|
||||
'rb') as source:
|
||||
with knownNodesLock:
|
||||
try:
|
||||
json_deserialize_knownnodes(source)
|
||||
|
|
24
src/paths.py
|
@ -8,6 +8,8 @@ import sys
|
|||
from datetime import datetime
|
||||
from shutil import move
|
||||
|
||||
import state
|
||||
|
||||
logger = logging.getLogger('default')
|
||||
|
||||
# When using py2exe or py2app, the variable frozen is added to the sys
|
||||
|
@ -31,6 +33,28 @@ def lookupExeFolder():
|
|||
return exeFolder
|
||||
|
||||
|
||||
def set_appdata_folder(path=None):
|
||||
"""Changes appdata location. Does not move any files, just where
|
||||
where it's looking when loading/saving later"""
|
||||
if not path:
|
||||
try:
|
||||
# default
|
||||
del os.environ['BITMESSAGE_HOME']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
os.environ['BITMESSAGE_HOME'] = path
|
||||
|
||||
state.appdata = lookupAppdataFolder()
|
||||
return state.appdata
|
||||
|
||||
|
||||
def get_active_config_folder():
|
||||
"""Returns current config directory, workaround for inconsistent
|
||||
imports"""
|
||||
return state.appdata
|
||||
|
||||
|
||||
def lookupAppdataFolder():
|
||||
"""Returns path of the folder where application data is stored"""
|
||||
APPNAME = "PyBitmessage"
|
||||
|
|
|
@ -17,11 +17,11 @@ from binascii import hexlify
|
|||
|
||||
# Project imports.
|
||||
import highlevelcrypto
|
||||
import state
|
||||
from addresses import decodeAddress, encodeVarint
|
||||
from bmconfigparser import BMConfigParser
|
||||
from debug import logger
|
||||
from helper_sql import sqlQuery
|
||||
from paths import get_active_config_folder
|
||||
|
||||
from pyelliptic import arithmetic
|
||||
|
||||
|
@ -114,7 +114,7 @@ def reloadMyAddressHashes():
|
|||
# myPrivateKeys.clear()
|
||||
|
||||
keyfileSecure = checkSensitiveFilePermissions(os.path.join(
|
||||
state.appdata, 'keys.dat'))
|
||||
get_active_config_folder(), 'keys.dat'))
|
||||
hasEnabledKeys = False
|
||||
for addressInKeysFile in BMConfigParser().addresses():
|
||||
isEnabled = BMConfigParser().getboolean(addressInKeysFile, 'enabled')
|
||||
|
@ -144,7 +144,7 @@ def reloadMyAddressHashes():
|
|||
|
||||
if not keyfileSecure:
|
||||
fixSensitiveFilePermissions(os.path.join(
|
||||
state.appdata, 'keys.dat'), hasEnabledKeys)
|
||||
get_active_config_folder(), 'keys.dat'), hasEnabledKeys)
|
||||
|
||||
|
||||
def reloadBroadcastSendersForWhichImWatching():
|
||||
|
|
|
@ -45,6 +45,7 @@ class TestProcessConfig(TestProcessProto):
|
|||
def test_config_defaults(self):
|
||||
"""Test settings in the generated config"""
|
||||
self._stop_process()
|
||||
self._kill_process()
|
||||
|
||||
config = BMConfigParser()
|
||||
config.read(os.path.join(self.home, 'keys.dat'))
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@ import os
|
|||
import tempfile
|
||||
import unittest
|
||||
|
||||
from pybitmessage.paths import set_appdata_folder
|
||||
|
||||
|
||||
class TestLogger(unittest.TestCase):
|
||||
"""A test case for bmconfigparser"""
|
||||
|
@ -35,11 +37,14 @@ level=DEBUG
|
|||
handlers=default
|
||||
'''
|
||||
I don't understand the purpose, it's not used in code:
I don't understand the purpose, it's not used in code:
```shell
$ grep -r 'getLogger' src | grep root
src/tests/test_logger.py: logger2 = logging.getLogger('root')
```
Why only on second pass? Why only on second pass?
I think it may be leftover from debugging, perhaps it isn't necessary anymore. I think it may be leftover from debugging, perhaps it isn't necessary anymore.
I don't understand the logic flow of the passes, this may be a leftover from debugging and perhaps isn't necesasry anymore. I don't understand the logic flow of the passes, this may be a leftover from debugging and perhaps isn't necesasry anymore.
This not fixes the test. I'll describe how it works, how it worked and how I wanted it to work a bit later. This not fixes the test. I'll describe how it works, how it worked and how I wanted it to work a bit later.
I'll look at it. I'll look at it.
A proper test should not rely on other tests side effects. What I wanted to demonstrate in this particular test:
All that works only after the first import of How it worked before:
You can reveal it by replacing all > I don't understand the logic flow of the passes, this may be a leftover from debugging and perhaps isn't necesasry anymore.
A proper test should not rely on other tests side effects.
What I wanted to demonstrate in this particular test:
1. when you import `logger` from `debug` it sets up the logging based on `logging.dat` config
1. `logger = logging.getLogger('default')` set after the import will be the same as in `debug`
1. if you call `resetLogging()` the configuration will be reapplied
All that works only after the first import of `debug` because it calls `helper_startup.loadConfig()`.
How it worked before:
1. precondition: there is no other test importing `debug` or other module inducing config setup or appdata change
1. first pass detected by `debug.log` in appdata remaining from core tests
1. on the second pass you need `resetLogging()`
You can reveal it by replacing all `tempfile.gettempdir()` by `tempfile.mkdtemp()` as was suggested before.
I'm not sure that I can precisely reconstruct what's happening now. It places I'm not sure that I can precisely reconstruct what's happening now. It places `logging.dat` which is used by subsequent runs of pybitmessage process and can be detected on second pass.
Maybe subclass this test case from Maybe subclass this test case from `TestProcessProto` and check only the presence of pattern (severity and initial message)? Then we can replace `logging.getLogger('default')` by conventional `logging.getLogger(__name__)`, compose proper `logging.dat` and check presence of all submodule names which write logs.
Oh it worked. So maybe I don't understand it correctly. > first pass detected by debug.log in appdata remaining from core tests
~~Only if it uses the same config directory. But the tests change the location of the config directory. I think this breaks the logic. You can see now that I reverted to your original logic in `test_logger.py`, and it fails on travis-ci.org. I don't want to merge until this is resolved.~~
Oh it worked. So maybe I don't understand it correctly.
I wrote before that my original logic was wrong. Standalone tests should not import any module with side effects. I wrote before that my original logic was wrong. Standalone tests should not import any module with side effects.
I debug my process-based variant, it succedes only on linux: https://travis-ci.org/github/g1itch/PyBitmessage/builds/740134631
|
||||
|
||||
def test_fileConfig(self):
|
||||
def test_fileConfig(self): # pylint: disable=invalid-name
|
||||
"""Put logging.dat with special pattern and check it was used"""
|
||||
tmp = os.environ['BITMESSAGE_HOME'] = tempfile.gettempdir()
|
||||
# FIXME: this doesn't always work, probably due to module
|
||||
# load order. Workaround through .travis.yml
|
||||
tmp = set_appdata_folder(tempfile.gettempdir())
|
||||
log_config = os.path.join(tmp, 'logging.dat')
|
||||
log_file = os.path.join(tmp, 'debug.log')
|
||||
pass_ = 1
|
||||
|
||||
def gen_log_config(pattern):
|
||||
"""A small closure to generate logging.dat with custom pattern"""
|
||||
|
@ -48,11 +53,14 @@ handlers=default
|
|||
|
||||
pattern = r' o_0 '
|
||||
gen_log_config(pattern)
|
||||
pass_ = 1
|
||||
|
||||
try:
|
||||
from pybitmessage.debug import logger, resetLogging
|
||||
# pylint: disable=import-outside-toplevel
|
||||
from pybitmessage.debug import logger, resetLogging, flush_logs
|
||||
if not os.path.isfile(log_file): # second pass
|
||||
pattern = r' <===> '
|
||||
pass_ = 2
|
||||
gen_log_config(pattern)
|
||||
resetLogging()
|
||||
except ImportError:
|
||||
|
@ -60,10 +68,17 @@ handlers=default
|
|||
finally:
|
||||
os.remove(log_config)
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
self.longMessage = True
|
||||
|
||||
logger_ = logging.getLogger('default')
|
||||
|
||||
self.assertEqual(logger, logger_)
|
||||
self.assertEqual(logger, logger_, ", pass {}".
|
||||
format(pass_))
|
||||
|
||||
logger_.info('Testing the logger...')
|
||||
flush_logs()
|
||||
|
||||
self.assertRegexpMatches(open(log_file).read(), pattern)
|
||||
# pylint: disable=deprecated-method
|
||||
self.assertRegexpMatches(open(log_file).read(), pattern,
|
||||
", pass {}".format(pass_))
|
||||
|
|
|
@ -5,12 +5,15 @@ Common reusable code for tests and tests for pybitmessage process.
|
|||
import os
|
||||
import signal
|
||||
import subprocess # nosec
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
|
||||
import psutil
|
||||
|
||||
from pybitmessage.paths import set_appdata_folder
|
||||
|
||||
|
||||
def put_signal_file(path, filename):
|
||||
"""Creates file, presence of which is a signal about some event."""
|
||||
|
@ -23,7 +26,26 @@ class TestProcessProto(unittest.TestCase):
|
|||
it starts pybitmessage in setUpClass() and stops it in tearDownClass()
|
||||
"""
|
||||
_process_cmd = ['pybitmessage', '-d']
|
||||
_threads_count = 15
|
||||
_threads_count_min = 15
|
||||
_threads_count_max = 16
|
||||
_threads_names = [
|
||||
'PyBitmessage',
|
||||
'addressGenerato',
|
||||
'singleWorker',
|
||||
'SQL',
|
||||
'objectProcessor',
|
||||
'singleCleaner',
|
||||
'singleAPI',
|
||||
'Asyncore',
|
||||
'ReceiveQueue_0',
|
||||
'ReceiveQueue_1',
|
||||
'ReceiveQueue_2',
|
||||
'Announcer',
|
||||
'InvBroadcaster',
|
||||
'AddrBroadcaster',
|
||||
'Downloader',
|
||||
'Uploader'
|
||||
]
|
||||
_files = (
|
||||
'keys.dat', 'debug.log', 'messages.dat', 'knownnodes.dat',
|
||||
'.api_started', 'unittest.lock'
|
||||
|
@ -32,10 +54,28 @@ class TestProcessProto(unittest.TestCase):
|
|||
@classmethod
|
||||
def setUpClass(cls):
|
||||
"""Setup environment and start pybitmessage"""
|
||||
cls.home = os.environ['BITMESSAGE_HOME'] = tempfile.gettempdir()
|
||||
cls.home = set_appdata_folder(tempfile.gettempdir())
|
||||
cls._cleanup_files()
|
||||
put_signal_file(cls.home, 'unittest.lock')
|
||||
starttime = int(time.time())
|
||||
subprocess.call(cls._process_cmd) # nosec
|
||||
time.sleep(5)
|
||||
timeout = starttime + 30
|
||||
while time.time() <= timeout:
|
||||
try:
|
||||
if os.path.exists(os.path.join(cls.home,
|
||||
'singleton.lock')):
|
||||
pstat = os.stat(os.path.join(cls.home, 'singleton.lock'))
|
||||
if starttime <= pstat.st_mtime and pstat.st_size > 0:
|
||||
break
|
||||
except OSError:
|
||||
break
|
||||
time.sleep(1)
|
||||
if time.time() >= timeout:
|
||||
raise psutil.TimeoutExpired(
|
||||
"Timeout starting {}".format(cls._process_cmd))
|
||||
# wait a bit for the program to fully start
|
||||
# 10 sec should be enough
|
||||
time.sleep(10)
|
||||
cls.pid = int(cls._get_readline('singleton.lock'))
|
||||
cls.process = psutil.Process(cls.pid)
|
||||
|
||||
|
@ -56,6 +96,18 @@ class TestProcessProto(unittest.TestCase):
|
|||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def _kill_process(cls, timeout=5):
|
||||
try:
|
||||
cls.process.send_signal(signal.SIGKILL)
|
||||
cls.process.wait(timeout)
|
||||
# Windows or already dead
|
||||
except (AttributeError, psutil.NoSuchProcess):
|
||||
return True
|
||||
# except psutil.TimeoutExpired propagates, it means something is very
|
||||
# wrong
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def _cleanup_files(cls):
|
||||
for pfile in cls._files:
|
||||
|
@ -69,33 +121,57 @@ class TestProcessProto(unittest.TestCase):
|
|||
"""Ensures that pybitmessage stopped and removes files"""
|
||||
try:
|
||||
if not cls._stop_process():
|
||||
print(open(os.path.join(cls.home, 'debug.log'), 'rb').read())
|
||||
cls.process.kill()
|
||||
Why sleep 10 seconds if singleton.lock is already checked? What may happen? Why sleep 10 seconds if singleton.lock is already checked? What may happen?
OK, I see. It is to start all threads and not fail test_threads and test_shutdown. OK, I see. It is to start all threads and not fail test_threads and test_shutdown.
But I hope that 5 sec will be enough for any environment (on travis it works without delay). This delay is used before starting tests in any test case subclassed from But I hope that 5 sec will be enough for any environment (on travis it works without delay). This delay is used before starting tests in any test case subclassed from `TestProcessProto` on each pass.
|
||||
except psutil.NoSuchProcess:
|
||||
cls.process.wait(5)
|
||||
except (psutil.TimeoutExpired, psutil.NoSuchProcess):
|
||||
pass
|
||||
finally:
|
||||
cls._cleanup_files()
|
||||
|
||||
def _test_threads(self):
|
||||
# only count for now
|
||||
"""Test number and names of threads"""
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
self.longMessage = True
|
||||
|
||||
try:
|
||||
# using ps for posix platforms
|
||||
# because of https://github.com/giampaolo/psutil/issues/613
|
||||
# PyBitmessage
|
||||
# - addressGenerator
|
||||
# - singleWorker
|
||||
# - SQL
|
||||
# - objectProcessor
|
||||
# - singleCleaner
|
||||
# - singleAPI
|
||||
# - Asyncore
|
||||
# - ReceiveQueue_0
|
||||
# - ReceiveQueue_1
|
||||
# - ReceiveQueue_2
|
||||
# - Announcer
|
||||
# - InvBroadcaster
|
||||
# - AddrBroadcaster
|
||||
# - Downloader
|
||||
self.assertEqual(
|
||||
len(self.process.threads()), self._threads_count)
|
||||
thread_names = subprocess.check_output([
|
||||
"ps", "-L", "-o", "comm=", "--pid",
|
||||
str(self.process.pid)
|
||||
`AccessDenied` on osx: https://travis-ci.org/github/g1itch/PyBitmessage/jobs/738524694
Test is now skipped on OSX. Test is now skipped on OSX.
|
||||
]).split()
|
||||
Maybe this can be shortened now:
Maybe this can be shortened now:
```patch
--- a/src/tests/test_process.py
+++ b/src/tests/test_process.py
@@ -120,28 +120,11 @@ class TestProcessProto(unittest.TestCase):
cls._cleanup_files()
def _test_threads(self):
- # only count for now
- # because of https://github.com/giampaolo/psutil/issues/613
- # PyBitmessage
- # - addressGenerator
- # - singleWorker
- # - SQL
- # - objectProcessor
- # - singleCleaner
- # - singleAPI
- # - Asyncore
- # - ReceiveQueue_0
- # - ReceiveQueue_1
- # - ReceiveQueue_2
- # - Announcer
- # - InvBroadcaster
- # - AddrBroadcaster
- # - Downloader
- # - Uploader
-
self.longMessage = True
try:
+ # using ps on Linux
+ # because of https://github.com/giampaolo/psutil/issues/613
```
Yea makes sense. Yea makes sense.
|
||||
except: # pylint: disable=bare-except
|
||||
thread_names = []
|
||||
|
||||
running_threads = len(thread_names)
|
||||
if 0 < running_threads < 30: # adequacy check
|
||||
extra_threads = []
|
||||
missing_threads = []
|
||||
for thread_name in thread_names:
|
||||
if thread_name not in self._threads_names:
|
||||
extra_threads.append(thread_name)
|
||||
for thread_name in self._threads_names:
|
||||
if thread_name not in thread_names:
|
||||
missing_threads.append(thread_name)
|
||||
|
||||
msg = "Missing threads: {}, Extra threads: {}".format(
|
||||
",".join(missing_threads), ",".join(extra_threads))
|
||||
else:
|
||||
running_threads = self.process.num_threads()
|
||||
if sys.platform.startswith('win'):
|
||||
running_threads -= 1 # one extra thread on Windows!
|
||||
msg = "Unexpected running thread count"
|
||||
|
||||
self.assertGreaterEqual(
|
||||
running_threads,
|
||||
self._threads_count_min,
|
||||
msg)
|
||||
|
||||
self.assertLessEqual(
|
||||
running_threads,
|
||||
self._threads_count_max,
|
||||
AttributeError on windows: https://travis-ci.org/github/g1itch/PyBitmessage/jobs/738506283 AttributeError on windows: https://travis-ci.org/github/g1itch/PyBitmessage/jobs/738506283
ok ok
Added an exception handler. Added an exception handler.
|
||||
msg)
|
||||
|
||||
|
||||
class TestProcessShutdown(TestProcessProto):
|
||||
|
@ -110,6 +186,13 @@ class TestProcessShutdown(TestProcessProto):
|
|||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
"""Special teardown because pybitmessage is already stopped"""
|
||||
try:
|
||||
if cls.process.is_running():
|
||||
cls.process.kill()
|
||||
cls.process.wait(5)
|
||||
except (psutil.TimeoutExpired, psutil.NoSuchProcess):
|
||||
pass
|
||||
finally:
|
||||
cls._cleanup_files()
|
||||
|
||||
|
||||
|
|
This is like
tearDown()
without cleaning the files.Yes I think you're correct.