Don't load heavy modules until needed
This commit is contained in:
parent
90ae95d9cb
commit
c28a4f6174
2
setup.py
2
setup.py
|
@ -12,7 +12,7 @@ from src.version import softwareVersion
|
|||
EXTRAS_REQUIRE = {
|
||||
'gir': ['pygobject'],
|
||||
'notify2': ['notify2'],
|
||||
'pyopencl': ['numpy', 'pyopencl'],
|
||||
'pyopencl': ['pyopencl', 'numpy'],
|
||||
'prctl': ['python_prctl'], # Named threads
|
||||
'qrcode': ['qrcode'],
|
||||
'sound;platform_system=="Windows"': ['winsound'],
|
||||
|
|
|
@ -28,7 +28,7 @@ class Task(object):
|
|||
self.target = target
|
||||
|
||||
class WorkProver(threading.Thread):
|
||||
def __init__(self, codePath, GPUVendors, seed, statusUpdated):
|
||||
def __init__(self, codePath, GPUVendor, seed, statusUpdated):
|
||||
super(self.__class__, self).__init__()
|
||||
|
||||
self.availableSolvers = {
|
||||
|
@ -51,7 +51,7 @@ class WorkProver(threading.Thread):
|
|||
pass
|
||||
|
||||
try:
|
||||
self.availableSolvers["gpu"] = gpusolver.GPUSolver(codePath, GPUVendors)
|
||||
self.availableSolvers["gpu"] = gpusolver.GPUSolver(codePath, GPUVendor)
|
||||
except gpusolver.GPUSolverError:
|
||||
pass
|
||||
|
||||
|
@ -81,32 +81,31 @@ class WorkProver(threading.Thread):
|
|||
if self.statusUpdated is None:
|
||||
return
|
||||
|
||||
if self.solver is None:
|
||||
parallelism = 0
|
||||
else:
|
||||
parallelism = self.solver.parallelism
|
||||
status = None
|
||||
|
||||
self.statusUpdated((self.solverName, parallelism, self.speed))
|
||||
if self.solver is not None:
|
||||
status = self.solver.status
|
||||
|
||||
def setSolver(self, name, parallelism):
|
||||
self.statusUpdated((self.solverName, status, self.speed))
|
||||
|
||||
def setSolver(self, name, configuration):
|
||||
if name is None and self.solverName is None:
|
||||
pass
|
||||
elif name == self.solverName:
|
||||
if self.solver.parallelism != parallelism:
|
||||
self.solver.setParallelism(parallelism)
|
||||
self.solver.setConfiguration(configuration)
|
||||
else:
|
||||
if self.solver is not None:
|
||||
self.solver.setParallelism(0)
|
||||
self.solver.setConfiguration(None)
|
||||
self.solverName = None
|
||||
self.solver = None
|
||||
|
||||
if name is not None:
|
||||
if name not in self.availableSolvers:
|
||||
name, parallelism = "dumb", 1
|
||||
name, configuration = "dumb", None
|
||||
|
||||
self.solverName = name
|
||||
self.solver = self.availableSolvers[name]
|
||||
self.solver.setParallelism(parallelism)
|
||||
self.solver.setConfiguration(configuration)
|
||||
|
||||
self.notifyStatus()
|
||||
|
||||
|
@ -175,7 +174,7 @@ class WorkProver(threading.Thread):
|
|||
self.currentTaskID = self.tasks[self.currentTaskID].next
|
||||
|
||||
def shutdown(self):
|
||||
self.setSolver(None, 0)
|
||||
self.setSolver(None, None)
|
||||
|
||||
for i in self.tasks.keys():
|
||||
self.cancelTask(i)
|
||||
|
|
|
@ -24,7 +24,7 @@ class DumbSolver(object):
|
|||
self.firstHash = ctypes.create_string_buffer(64)
|
||||
self.secondHash = ctypes.create_string_buffer(64)
|
||||
|
||||
self.parallelism = 1
|
||||
self.status = None
|
||||
|
||||
def search(self, initialHash, target, seed, timeout):
|
||||
startTime = utils.getTimePoint()
|
||||
|
@ -66,5 +66,5 @@ class DumbSolver(object):
|
|||
if utils.getTimePoint() - startTime >= timeout:
|
||||
return None, 256 * i
|
||||
|
||||
def setParallelism(self, parallelism):
|
||||
def setConfiguration(self, configuration):
|
||||
pass
|
||||
|
|
|
@ -58,6 +58,7 @@ class FastSolver(object):
|
|||
self.iterationsCount = ctypes.c_ulonglong()
|
||||
|
||||
self.parallelism = 0
|
||||
self.status = 0
|
||||
|
||||
def search(self, initialHash, target, seed, timeout):
|
||||
found = self.libfastsolver.fastsolver_search(
|
||||
|
@ -70,11 +71,16 @@ class FastSolver(object):
|
|||
else:
|
||||
return None, self.iterationsCount.value
|
||||
|
||||
def setParallelism(self, parallelism):
|
||||
parallelism = min(4096, parallelism)
|
||||
def setConfiguration(self, configuration):
|
||||
if configuration is None:
|
||||
parallelism = 0
|
||||
else:
|
||||
parallelism = min(4096, configuration)
|
||||
|
||||
for i in xrange(self.parallelism, parallelism):
|
||||
self.parallelism = self.libfastsolver.fastsolver_add()
|
||||
|
||||
if parallelism < self.parallelism:
|
||||
self.parallelism = self.libfastsolver.fastsolver_remove(self.parallelism - parallelism)
|
||||
|
||||
self.status = parallelism
|
||||
|
|
|
@ -5,6 +5,11 @@ import struct
|
|||
import dumbsolver
|
||||
|
||||
def setIdle():
|
||||
if hasattr(os, "nice"):
|
||||
os.nice(40)
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
import psutil
|
||||
|
||||
|
@ -23,14 +28,9 @@ def setIdle():
|
|||
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, PID)
|
||||
|
||||
win32process.SetPriorityClass(handle, win32process.IDLE_PRIORITY_CLASS)
|
||||
|
||||
return
|
||||
except:
|
||||
pass
|
||||
|
||||
if hasattr(os, "nice"):
|
||||
os.nice(40)
|
||||
|
||||
def threadFunction(local, remote, codePath, threadNumber):
|
||||
remote.close()
|
||||
setIdle()
|
||||
|
@ -60,7 +60,7 @@ class ForkingSolver(object):
|
|||
self.pipes = []
|
||||
self.processes = []
|
||||
|
||||
self.parallelism = 0
|
||||
self.status = 0
|
||||
|
||||
self.codePath = codePath
|
||||
|
||||
|
@ -80,10 +80,13 @@ class ForkingSolver(object):
|
|||
|
||||
return bestNonce, totalIterationsCount
|
||||
|
||||
def setParallelism(self, parallelism):
|
||||
parallelism = min(4096, parallelism)
|
||||
def setConfiguration(self, configuration):
|
||||
if configuration is None:
|
||||
parallelism = 0
|
||||
else:
|
||||
parallelism = min(4096, configuration)
|
||||
|
||||
for i in xrange(self.parallelism, parallelism):
|
||||
for i in xrange(len(self.processes), parallelism):
|
||||
local, remote = multiprocessing.Pipe()
|
||||
|
||||
process = multiprocessing.Process(target = threadFunction, args = (remote, local, self.codePath, i))
|
||||
|
@ -94,13 +97,13 @@ class ForkingSolver(object):
|
|||
self.pipes.append(local)
|
||||
self.processes.append(process)
|
||||
|
||||
for i in xrange(parallelism, self.parallelism):
|
||||
for i in xrange(parallelism, len(self.processes)):
|
||||
pipe = self.pipes.pop()
|
||||
|
||||
pipe.send(("shutdown", ))
|
||||
pipe.close()
|
||||
|
||||
for i in xrange(parallelism, self.parallelism):
|
||||
for i in xrange(parallelism, len(self.processes)):
|
||||
self.processes.pop().join()
|
||||
|
||||
self.parallelism = parallelism
|
||||
self.status = parallelism
|
||||
|
|
|
@ -11,53 +11,31 @@ class GPUSolverError(Exception):
|
|||
pass
|
||||
|
||||
class GPUSolver(object):
|
||||
def __init__(self, codePath, vendors = None):
|
||||
global pyopencl, numpy
|
||||
def __init__(self, codePath, vendor = None):
|
||||
global pyopencl
|
||||
|
||||
try:
|
||||
import numpy
|
||||
import pyopencl
|
||||
except ImportError:
|
||||
raise GPUSolverError()
|
||||
|
||||
device = None
|
||||
|
||||
for i in pyopencl.get_platforms():
|
||||
if vendors is not None and i.vendor not in vendors:
|
||||
if vendor is not None and i.vendor != vendor:
|
||||
continue
|
||||
|
||||
devices = i.get_devices(device_type = pyopencl.device_type.GPU)
|
||||
|
||||
if len(devices) != 0:
|
||||
device = devices[0]
|
||||
self.device = devices[0]
|
||||
|
||||
break
|
||||
else:
|
||||
raise GPUSolverError()
|
||||
|
||||
context = pyopencl.Context(devices = [device])
|
||||
|
||||
computeUnitsCount = device.get_info(pyopencl.device_info.MAX_COMPUTE_UNITS)
|
||||
workGroupSize = device.get_info(pyopencl.device_info.MAX_WORK_GROUP_SIZE)
|
||||
|
||||
self.parallelism = workGroupSize * computeUnitsCount
|
||||
self.batchSize = self.parallelism * 256
|
||||
|
||||
self.queue = pyopencl.CommandQueue(context, device)
|
||||
|
||||
with open(os.path.join(codePath, "gpusolver.cl")) as file:
|
||||
source = file.read()
|
||||
self.source = file.read()
|
||||
|
||||
program = pyopencl.Program(context, source).build()
|
||||
|
||||
self.hostOutput = numpy.zeros(1 + self.batchSize, numpy.uint32)
|
||||
self.hostInput = numpy.zeros(1 + 8 + 1, numpy.uint64)
|
||||
|
||||
self.output = pyopencl.Buffer(context, pyopencl.mem_flags.READ_WRITE, 4 * (1 + self.batchSize))
|
||||
self.input = pyopencl.Buffer(context, pyopencl.mem_flags.READ_ONLY, 8 * (1 + 8 + 1))
|
||||
|
||||
self.kernel = program.search
|
||||
self.kernel.set_args(self.output, self.input)
|
||||
self.status = None
|
||||
|
||||
def search(self, initialHash, target, seed, timeout):
|
||||
startTime = utils.getTimePoint()
|
||||
|
@ -102,5 +80,32 @@ class GPUSolver(object):
|
|||
if utils.getTimePoint() - startTime >= timeout:
|
||||
return None, self.batchSize * i
|
||||
|
||||
def setParallelism(self, parallelism):
|
||||
pass
|
||||
def setConfiguration(self, configuration):
|
||||
global numpy
|
||||
|
||||
if numpy is not None:
|
||||
return
|
||||
|
||||
import numpy
|
||||
|
||||
context = pyopencl.Context(devices = [self.device])
|
||||
|
||||
computeUnitsCount = self.device.get_info(pyopencl.device_info.MAX_COMPUTE_UNITS)
|
||||
workGroupSize = self.device.get_info(pyopencl.device_info.MAX_WORK_GROUP_SIZE)
|
||||
|
||||
self.batchSize = workGroupSize * computeUnitsCount * 256
|
||||
|
||||
self.queue = pyopencl.CommandQueue(context, self.device)
|
||||
|
||||
program = pyopencl.Program(context, self.source).build()
|
||||
|
||||
self.hostOutput = numpy.zeros(1 + self.batchSize, numpy.uint32)
|
||||
self.hostInput = numpy.zeros(1 + 8 + 1, numpy.uint64)
|
||||
|
||||
self.output = pyopencl.Buffer(context, pyopencl.mem_flags.READ_WRITE, 4 * (1 + self.batchSize))
|
||||
self.input = pyopencl.Buffer(context, pyopencl.mem_flags.READ_ONLY, 8 * (1 + 8 + 1))
|
||||
|
||||
self.kernel = program.search
|
||||
self.kernel.set_args(self.output, self.input)
|
||||
|
||||
self.status = self.batchSize
|
||||
|
|
|
@ -85,7 +85,7 @@ class TestSolver(unittest.TestCase):
|
|||
except gpusolver.GPUSolverError:
|
||||
self.skipTest("OpenCL unavailable")
|
||||
|
||||
self.solver.setParallelism(1)
|
||||
self.solver.setConfiguration(self.configuration)
|
||||
|
||||
def testSearch(self):
|
||||
nonce = None
|
||||
|
@ -101,19 +101,23 @@ class TestSolver(unittest.TestCase):
|
|||
self.assertTrue(utils.checkProof(nonce, initialHash, target))
|
||||
|
||||
def tearDown(self):
|
||||
self.solver.setParallelism(0)
|
||||
self.solver.setConfiguration(None)
|
||||
|
||||
class TestDumbSolver(TestSolver):
|
||||
Solver = dumbsolver.DumbSolver
|
||||
configuration = None
|
||||
|
||||
class TestForkingSolver(TestSolver):
|
||||
Solver = forkingsolver.ForkingSolver
|
||||
configuration = 1
|
||||
|
||||
class TestFastSolver(TestSolver):
|
||||
Solver = fastsolver.FastSolver
|
||||
configuration = 1
|
||||
|
||||
class TestGPUSolver(TestSolver):
|
||||
Solver = gpusolver.GPUSolver
|
||||
configuration = None
|
||||
|
||||
class TestWorkProver(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
|
Loading…
Reference in New Issue
Block a user