diff --git a/elephant/test/test_asset.py b/elephant/test/test_asset.py index e9309e4c2..fad6d7523 100644 --- a/elephant/test/test_asset.py +++ b/elephant/test/test_asset.py @@ -22,6 +22,7 @@ from elephant import statistics, kernels from elephant.spike_train_generation import homogeneous_poisson_process +from elephant.utils import get_cuda_capability_major, get_opencl_capability try: import sklearn @@ -33,17 +34,8 @@ HAVE_SKLEARN = True stretchedmetric2d = asset._stretched_metric_2d -try: - import pyopencl - HAVE_PYOPENCL = asset.get_opencl_capability() -except ImportError: - HAVE_PYOPENCL = False - -try: - import pycuda - HAVE_CUDA = asset.get_cuda_capability_major() > 0 -except ImportError: - HAVE_CUDA = False +HAVE_PYOPENCL = get_opencl_capability() +HAVE_CUDA = get_cuda_capability_major() != 0 class AssetBinningTestCase(unittest.TestCase): @@ -513,6 +505,7 @@ def test_intersection_matrix(self): # regression test Issue #481 # see: https://github.com/NeuralEnsemble/elephant/issues/481 + @unittest.skipIf(HAVE_CUDA, "CUDA available, will be used instead of CPU") def test_asset_choose_backend_opencl(self): class TestClassBackend(asset._GPUBackend): diff --git a/elephant/utils.py b/elephant/utils.py index b4ddfee22..dfdd575c9 100644 --- a/elephant/utils.py +++ b/elephant/utils.py @@ -337,40 +337,30 @@ def round_binning_errors(values, tolerance=1e-8): def get_cuda_capability_major(): """ - Extracts CUDA capability major version of the first available Nvidia GPU - card, if detected. Otherwise, return 0. + If PyCUDA is available, extracts CUDA capability major version of the + first available Nvidia GPU card, if detected. Otherwise, returns 0. Returns ------- int CUDA capability major version. """ - cuda_success = 0 - for libname in ("libcuda.so", "libcuda.dylib", "cuda.dll"): - try: - cuda = ctypes.CDLL(libname) - except OSError: - continue - else: - break - else: - # not found - return 0 - result = cuda.cuInit(0) - if result != cuda_success: + try: + import pycuda.driver as cuda + except ImportError: return 0 - device = ctypes.c_int() - # parse the first GPU card only - result = cuda.cuDeviceGet(ctypes.byref(device), 0) - if result != cuda_success: + + try: + import pycuda.autoinit + except (cuda.RuntimeError, AttributeError): return 0 - cc_major = ctypes.c_int() - cc_minor = ctypes.c_int() - cuda.cuDeviceComputeCapability( - ctypes.byref(cc_major), ctypes.byref(cc_minor), device - ) - return cc_major.value + try: + device = cuda.Device(0) + major, _ = device.compute_capability() + return major + except cuda.Error: + return 0 def get_opencl_capability(): @@ -380,7 +370,7 @@ def get_opencl_capability(): Returns ------- bool - True: if openCL platform detected and at least one device is found, + True: if OpenCL platform detected and at least one device is found, False: if OpenCL is not found or if no OpenCL devices are found """ try: