Don't load heavy modules until needed
This commit is contained in:
parent
90ae95d9cb
commit
c28a4f6174
2
setup.py
2
setup.py
|
@ -12,7 +12,7 @@ from src.version import softwareVersion
|
||||||
EXTRAS_REQUIRE = {
|
EXTRAS_REQUIRE = {
|
||||||
'gir': ['pygobject'],
|
'gir': ['pygobject'],
|
||||||
'notify2': ['notify2'],
|
'notify2': ['notify2'],
|
||||||
'pyopencl': ['numpy', 'pyopencl'],
|
'pyopencl': ['pyopencl', 'numpy'],
|
||||||
'prctl': ['python_prctl'], # Named threads
|
'prctl': ['python_prctl'], # Named threads
|
||||||
'qrcode': ['qrcode'],
|
'qrcode': ['qrcode'],
|
||||||
'sound;platform_system=="Windows"': ['winsound'],
|
'sound;platform_system=="Windows"': ['winsound'],
|
||||||
|
|
|
@ -28,7 +28,7 @@ class Task(object):
|
||||||
self.target = target
|
self.target = target
|
||||||
|
|
||||||
class WorkProver(threading.Thread):
|
class WorkProver(threading.Thread):
|
||||||
def __init__(self, codePath, GPUVendors, seed, statusUpdated):
|
def __init__(self, codePath, GPUVendor, seed, statusUpdated):
|
||||||
super(self.__class__, self).__init__()
|
super(self.__class__, self).__init__()
|
||||||
|
|
||||||
self.availableSolvers = {
|
self.availableSolvers = {
|
||||||
|
@ -51,7 +51,7 @@ class WorkProver(threading.Thread):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.availableSolvers["gpu"] = gpusolver.GPUSolver(codePath, GPUVendors)
|
self.availableSolvers["gpu"] = gpusolver.GPUSolver(codePath, GPUVendor)
|
||||||
except gpusolver.GPUSolverError:
|
except gpusolver.GPUSolverError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -81,32 +81,31 @@ class WorkProver(threading.Thread):
|
||||||
if self.statusUpdated is None:
|
if self.statusUpdated is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.solver is None:
|
status = None
|
||||||
parallelism = 0
|
|
||||||
else:
|
|
||||||
parallelism = self.solver.parallelism
|
|
||||||
|
|
||||||
self.statusUpdated((self.solverName, parallelism, self.speed))
|
if self.solver is not None:
|
||||||
|
status = self.solver.status
|
||||||
|
|
||||||
def setSolver(self, name, parallelism):
|
self.statusUpdated((self.solverName, status, self.speed))
|
||||||
|
|
||||||
|
def setSolver(self, name, configuration):
|
||||||
if name is None and self.solverName is None:
|
if name is None and self.solverName is None:
|
||||||
pass
|
pass
|
||||||
elif name == self.solverName:
|
elif name == self.solverName:
|
||||||
if self.solver.parallelism != parallelism:
|
self.solver.setConfiguration(configuration)
|
||||||
self.solver.setParallelism(parallelism)
|
|
||||||
else:
|
else:
|
||||||
if self.solver is not None:
|
if self.solver is not None:
|
||||||
self.solver.setParallelism(0)
|
self.solver.setConfiguration(None)
|
||||||
self.solverName = None
|
self.solverName = None
|
||||||
self.solver = None
|
self.solver = None
|
||||||
|
|
||||||
if name is not None:
|
if name is not None:
|
||||||
if name not in self.availableSolvers:
|
if name not in self.availableSolvers:
|
||||||
name, parallelism = "dumb", 1
|
name, configuration = "dumb", None
|
||||||
|
|
||||||
self.solverName = name
|
self.solverName = name
|
||||||
self.solver = self.availableSolvers[name]
|
self.solver = self.availableSolvers[name]
|
||||||
self.solver.setParallelism(parallelism)
|
self.solver.setConfiguration(configuration)
|
||||||
|
|
||||||
self.notifyStatus()
|
self.notifyStatus()
|
||||||
|
|
||||||
|
@ -175,7 +174,7 @@ class WorkProver(threading.Thread):
|
||||||
self.currentTaskID = self.tasks[self.currentTaskID].next
|
self.currentTaskID = self.tasks[self.currentTaskID].next
|
||||||
|
|
||||||
def shutdown(self):
|
def shutdown(self):
|
||||||
self.setSolver(None, 0)
|
self.setSolver(None, None)
|
||||||
|
|
||||||
for i in self.tasks.keys():
|
for i in self.tasks.keys():
|
||||||
self.cancelTask(i)
|
self.cancelTask(i)
|
||||||
|
|
|
@ -24,7 +24,7 @@ class DumbSolver(object):
|
||||||
self.firstHash = ctypes.create_string_buffer(64)
|
self.firstHash = ctypes.create_string_buffer(64)
|
||||||
self.secondHash = ctypes.create_string_buffer(64)
|
self.secondHash = ctypes.create_string_buffer(64)
|
||||||
|
|
||||||
self.parallelism = 1
|
self.status = None
|
||||||
|
|
||||||
def search(self, initialHash, target, seed, timeout):
|
def search(self, initialHash, target, seed, timeout):
|
||||||
startTime = utils.getTimePoint()
|
startTime = utils.getTimePoint()
|
||||||
|
@ -66,5 +66,5 @@ class DumbSolver(object):
|
||||||
if utils.getTimePoint() - startTime >= timeout:
|
if utils.getTimePoint() - startTime >= timeout:
|
||||||
return None, 256 * i
|
return None, 256 * i
|
||||||
|
|
||||||
def setParallelism(self, parallelism):
|
def setConfiguration(self, configuration):
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -58,6 +58,7 @@ class FastSolver(object):
|
||||||
self.iterationsCount = ctypes.c_ulonglong()
|
self.iterationsCount = ctypes.c_ulonglong()
|
||||||
|
|
||||||
self.parallelism = 0
|
self.parallelism = 0
|
||||||
|
self.status = 0
|
||||||
|
|
||||||
def search(self, initialHash, target, seed, timeout):
|
def search(self, initialHash, target, seed, timeout):
|
||||||
found = self.libfastsolver.fastsolver_search(
|
found = self.libfastsolver.fastsolver_search(
|
||||||
|
@ -70,11 +71,16 @@ class FastSolver(object):
|
||||||
else:
|
else:
|
||||||
return None, self.iterationsCount.value
|
return None, self.iterationsCount.value
|
||||||
|
|
||||||
def setParallelism(self, parallelism):
|
def setConfiguration(self, configuration):
|
||||||
parallelism = min(4096, parallelism)
|
if configuration is None:
|
||||||
|
parallelism = 0
|
||||||
|
else:
|
||||||
|
parallelism = min(4096, configuration)
|
||||||
|
|
||||||
for i in xrange(self.parallelism, parallelism):
|
for i in xrange(self.parallelism, parallelism):
|
||||||
self.parallelism = self.libfastsolver.fastsolver_add()
|
self.parallelism = self.libfastsolver.fastsolver_add()
|
||||||
|
|
||||||
if parallelism < self.parallelism:
|
if parallelism < self.parallelism:
|
||||||
self.parallelism = self.libfastsolver.fastsolver_remove(self.parallelism - parallelism)
|
self.parallelism = self.libfastsolver.fastsolver_remove(self.parallelism - parallelism)
|
||||||
|
|
||||||
|
self.status = parallelism
|
||||||
|
|
|
@ -5,6 +5,11 @@ import struct
|
||||||
import dumbsolver
|
import dumbsolver
|
||||||
|
|
||||||
def setIdle():
|
def setIdle():
|
||||||
|
if hasattr(os, "nice"):
|
||||||
|
os.nice(40)
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import psutil
|
import psutil
|
||||||
|
|
||||||
|
@ -23,14 +28,9 @@ def setIdle():
|
||||||
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, PID)
|
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, PID)
|
||||||
|
|
||||||
win32process.SetPriorityClass(handle, win32process.IDLE_PRIORITY_CLASS)
|
win32process.SetPriorityClass(handle, win32process.IDLE_PRIORITY_CLASS)
|
||||||
|
|
||||||
return
|
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if hasattr(os, "nice"):
|
|
||||||
os.nice(40)
|
|
||||||
|
|
||||||
def threadFunction(local, remote, codePath, threadNumber):
|
def threadFunction(local, remote, codePath, threadNumber):
|
||||||
remote.close()
|
remote.close()
|
||||||
setIdle()
|
setIdle()
|
||||||
|
@ -60,7 +60,7 @@ class ForkingSolver(object):
|
||||||
self.pipes = []
|
self.pipes = []
|
||||||
self.processes = []
|
self.processes = []
|
||||||
|
|
||||||
self.parallelism = 0
|
self.status = 0
|
||||||
|
|
||||||
self.codePath = codePath
|
self.codePath = codePath
|
||||||
|
|
||||||
|
@ -80,10 +80,13 @@ class ForkingSolver(object):
|
||||||
|
|
||||||
return bestNonce, totalIterationsCount
|
return bestNonce, totalIterationsCount
|
||||||
|
|
||||||
def setParallelism(self, parallelism):
|
def setConfiguration(self, configuration):
|
||||||
parallelism = min(4096, parallelism)
|
if configuration is None:
|
||||||
|
parallelism = 0
|
||||||
|
else:
|
||||||
|
parallelism = min(4096, configuration)
|
||||||
|
|
||||||
for i in xrange(self.parallelism, parallelism):
|
for i in xrange(len(self.processes), parallelism):
|
||||||
local, remote = multiprocessing.Pipe()
|
local, remote = multiprocessing.Pipe()
|
||||||
|
|
||||||
process = multiprocessing.Process(target = threadFunction, args = (remote, local, self.codePath, i))
|
process = multiprocessing.Process(target = threadFunction, args = (remote, local, self.codePath, i))
|
||||||
|
@ -94,13 +97,13 @@ class ForkingSolver(object):
|
||||||
self.pipes.append(local)
|
self.pipes.append(local)
|
||||||
self.processes.append(process)
|
self.processes.append(process)
|
||||||
|
|
||||||
for i in xrange(parallelism, self.parallelism):
|
for i in xrange(parallelism, len(self.processes)):
|
||||||
pipe = self.pipes.pop()
|
pipe = self.pipes.pop()
|
||||||
|
|
||||||
pipe.send(("shutdown", ))
|
pipe.send(("shutdown", ))
|
||||||
pipe.close()
|
pipe.close()
|
||||||
|
|
||||||
for i in xrange(parallelism, self.parallelism):
|
for i in xrange(parallelism, len(self.processes)):
|
||||||
self.processes.pop().join()
|
self.processes.pop().join()
|
||||||
|
|
||||||
self.parallelism = parallelism
|
self.status = parallelism
|
||||||
|
|
|
@ -11,53 +11,31 @@ class GPUSolverError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class GPUSolver(object):
|
class GPUSolver(object):
|
||||||
def __init__(self, codePath, vendors = None):
|
def __init__(self, codePath, vendor = None):
|
||||||
global pyopencl, numpy
|
global pyopencl
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import numpy
|
|
||||||
import pyopencl
|
import pyopencl
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise GPUSolverError()
|
raise GPUSolverError()
|
||||||
|
|
||||||
device = None
|
|
||||||
|
|
||||||
for i in pyopencl.get_platforms():
|
for i in pyopencl.get_platforms():
|
||||||
if vendors is not None and i.vendor not in vendors:
|
if vendor is not None and i.vendor != vendor:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
devices = i.get_devices(device_type = pyopencl.device_type.GPU)
|
devices = i.get_devices(device_type = pyopencl.device_type.GPU)
|
||||||
|
|
||||||
if len(devices) != 0:
|
if len(devices) != 0:
|
||||||
device = devices[0]
|
self.device = devices[0]
|
||||||
|
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
raise GPUSolverError()
|
raise GPUSolverError()
|
||||||
|
|
||||||
context = pyopencl.Context(devices = [device])
|
|
||||||
|
|
||||||
computeUnitsCount = device.get_info(pyopencl.device_info.MAX_COMPUTE_UNITS)
|
|
||||||
workGroupSize = device.get_info(pyopencl.device_info.MAX_WORK_GROUP_SIZE)
|
|
||||||
|
|
||||||
self.parallelism = workGroupSize * computeUnitsCount
|
|
||||||
self.batchSize = self.parallelism * 256
|
|
||||||
|
|
||||||
self.queue = pyopencl.CommandQueue(context, device)
|
|
||||||
|
|
||||||
with open(os.path.join(codePath, "gpusolver.cl")) as file:
|
with open(os.path.join(codePath, "gpusolver.cl")) as file:
|
||||||
source = file.read()
|
self.source = file.read()
|
||||||
|
|
||||||
program = pyopencl.Program(context, source).build()
|
self.status = None
|
||||||
|
|
||||||
self.hostOutput = numpy.zeros(1 + self.batchSize, numpy.uint32)
|
|
||||||
self.hostInput = numpy.zeros(1 + 8 + 1, numpy.uint64)
|
|
||||||
|
|
||||||
self.output = pyopencl.Buffer(context, pyopencl.mem_flags.READ_WRITE, 4 * (1 + self.batchSize))
|
|
||||||
self.input = pyopencl.Buffer(context, pyopencl.mem_flags.READ_ONLY, 8 * (1 + 8 + 1))
|
|
||||||
|
|
||||||
self.kernel = program.search
|
|
||||||
self.kernel.set_args(self.output, self.input)
|
|
||||||
|
|
||||||
def search(self, initialHash, target, seed, timeout):
|
def search(self, initialHash, target, seed, timeout):
|
||||||
startTime = utils.getTimePoint()
|
startTime = utils.getTimePoint()
|
||||||
|
@ -102,5 +80,32 @@ class GPUSolver(object):
|
||||||
if utils.getTimePoint() - startTime >= timeout:
|
if utils.getTimePoint() - startTime >= timeout:
|
||||||
return None, self.batchSize * i
|
return None, self.batchSize * i
|
||||||
|
|
||||||
def setParallelism(self, parallelism):
|
def setConfiguration(self, configuration):
|
||||||
pass
|
global numpy
|
||||||
|
|
||||||
|
if numpy is not None:
|
||||||
|
return
|
||||||
|
|
||||||
|
import numpy
|
||||||
|
|
||||||
|
context = pyopencl.Context(devices = [self.device])
|
||||||
|
|
||||||
|
computeUnitsCount = self.device.get_info(pyopencl.device_info.MAX_COMPUTE_UNITS)
|
||||||
|
workGroupSize = self.device.get_info(pyopencl.device_info.MAX_WORK_GROUP_SIZE)
|
||||||
|
|
||||||
|
self.batchSize = workGroupSize * computeUnitsCount * 256
|
||||||
|
|
||||||
|
self.queue = pyopencl.CommandQueue(context, self.device)
|
||||||
|
|
||||||
|
program = pyopencl.Program(context, self.source).build()
|
||||||
|
|
||||||
|
self.hostOutput = numpy.zeros(1 + self.batchSize, numpy.uint32)
|
||||||
|
self.hostInput = numpy.zeros(1 + 8 + 1, numpy.uint64)
|
||||||
|
|
||||||
|
self.output = pyopencl.Buffer(context, pyopencl.mem_flags.READ_WRITE, 4 * (1 + self.batchSize))
|
||||||
|
self.input = pyopencl.Buffer(context, pyopencl.mem_flags.READ_ONLY, 8 * (1 + 8 + 1))
|
||||||
|
|
||||||
|
self.kernel = program.search
|
||||||
|
self.kernel.set_args(self.output, self.input)
|
||||||
|
|
||||||
|
self.status = self.batchSize
|
||||||
|
|
|
@ -85,7 +85,7 @@ class TestSolver(unittest.TestCase):
|
||||||
except gpusolver.GPUSolverError:
|
except gpusolver.GPUSolverError:
|
||||||
self.skipTest("OpenCL unavailable")
|
self.skipTest("OpenCL unavailable")
|
||||||
|
|
||||||
self.solver.setParallelism(1)
|
self.solver.setConfiguration(self.configuration)
|
||||||
|
|
||||||
def testSearch(self):
|
def testSearch(self):
|
||||||
nonce = None
|
nonce = None
|
||||||
|
@ -101,19 +101,23 @@ class TestSolver(unittest.TestCase):
|
||||||
self.assertTrue(utils.checkProof(nonce, initialHash, target))
|
self.assertTrue(utils.checkProof(nonce, initialHash, target))
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.solver.setParallelism(0)
|
self.solver.setConfiguration(None)
|
||||||
|
|
||||||
class TestDumbSolver(TestSolver):
|
class TestDumbSolver(TestSolver):
|
||||||
Solver = dumbsolver.DumbSolver
|
Solver = dumbsolver.DumbSolver
|
||||||
|
configuration = None
|
||||||
|
|
||||||
class TestForkingSolver(TestSolver):
|
class TestForkingSolver(TestSolver):
|
||||||
Solver = forkingsolver.ForkingSolver
|
Solver = forkingsolver.ForkingSolver
|
||||||
|
configuration = 1
|
||||||
|
|
||||||
class TestFastSolver(TestSolver):
|
class TestFastSolver(TestSolver):
|
||||||
Solver = fastsolver.FastSolver
|
Solver = fastsolver.FastSolver
|
||||||
|
configuration = 1
|
||||||
|
|
||||||
class TestGPUSolver(TestSolver):
|
class TestGPUSolver(TestSolver):
|
||||||
Solver = gpusolver.GPUSolver
|
Solver = gpusolver.GPUSolver
|
||||||
|
configuration = None
|
||||||
|
|
||||||
class TestWorkProver(unittest.TestCase):
|
class TestWorkProver(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
|
Loading…
Reference in New Issue
Block a user