• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python cmodule.get_lib_extension函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.gof.cmodule.get_lib_extension函数的典型用法代码示例。如果您正苦于以下问题:Python get_lib_extension函数的具体用法?Python get_lib_extension怎么用?Python get_lib_extension使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_lib_extension函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: compile_str


#.........这里部分代码省略.........
        # the standards ones.
        include_dirs = include_dirs + std_include_dirs()
        if os.path.abspath(os.path.split(__file__)[0]) not in include_dirs:
            include_dirs.append(os.path.abspath(os.path.split(__file__)[0]))

        libs = std_libs() + libs
        if "cudart" not in libs:
            libs.append("cudart")

        lib_dirs = std_lib_dirs() + lib_dirs
        if any(ld == os.path.join(cuda_root, "lib") or ld == os.path.join(cuda_root, "lib64") for ld in lib_dirs):
            warnings.warn(
                "You have the cuda library directory in your "
                "lib_dirs. This has been known to cause problems "
                "and should not be done."
            )

        if sys.platform != "darwin":
            # sometimes, the linker cannot find -lpython so we need to tell it
            # explicitly where it is located
            # this returns somepath/lib/python2.x
            python_lib = distutils.sysconfig.get_python_lib(plat_specific=1, standard_lib=1)
            python_lib = os.path.dirname(python_lib)
            if python_lib not in lib_dirs:
                lib_dirs.append(python_lib)

        cppfilename = os.path.join(location, "mod.cu")
        cppfile = open(cppfilename, "w")

        _logger.debug("Writing module C++ code to %s", cppfilename)

        cppfile.write(src_code)
        cppfile.close()
        lib_filename = os.path.join(location, "%s.%s" % (module_name, get_lib_extension()))

        _logger.debug("Generating shared lib %s", lib_filename)
        # TODO: Why do these args cause failure on gtx285 that has 1.3
        # compute capability? '--gpu-architecture=compute_13',
        # '--gpu-code=compute_13',
        # nvcc argument
        preargs1 = []
        for pa in preargs:
            for pattern in [
                "-O",
                "-arch=",
                "-ccbin=",
                "-G",
                "-g",
                "-I",
                "-L",
                "--fmad",
                "--ftz",
                "--maxrregcount",
                "--prec-div",
                "--prec-sqrt",
                "--use_fast_math",
                "-fmad",
                "-ftz",
                "-maxrregcount",
                "-prec-div",
                "-prec-sqrt",
                "-use_fast_math",
                "--use-local-env",
                "--cl-version=",
            ]:
开发者ID:naisanza,项目名称:Theano,代码行数:66,代码来源:nvcc_compiler.py


示例2: get_lib_extension

    A warning is displayed, so that the user is aware that cuda-based code is
    not going to work.
    Note that there is no point calling this function from outside of
    `cuda.__init__`, since it has no effect once the module is loaded.

    """
    global cuda_available, cuda_warning_is_displayed
    cuda_available = False

# cuda_ndarray compile and import
cuda_path = os.path.abspath(os.path.split(__file__)[0])

cuda_ndarray_loc = os.path.join(config.compiledir, 'cuda_ndarray')
cuda_ndarray_so = os.path.join(cuda_ndarray_loc,
                               'cuda_ndarray.' + get_lib_extension())
libcuda_ndarray_so = os.path.join(cuda_ndarray_loc,
                               'libcuda_ndarray.' + get_lib_extension())


def try_import():
    """
    Load the cuda_ndarray module if present and up to date.
    Return True if loaded correctly, otherwise return False.

    """
    cuda_files = (
        'cuda_ndarray.cu',
        'cuda_ndarray.cuh',
        'conv_full_kernel.cu',
        'cnmem.h',
开发者ID:Ungar7,项目名称:Theano,代码行数:30,代码来源:__init__.py


示例3: compile_str

    def compile_str(
            module_name, src_code,
            location=None, include_dirs=[], lib_dirs=[], libs=[], preargs=[],
            rpaths=rpath_defaults, py_module=True):
        """:param module_name: string (this has been embedded in the src_code
        :param src_code: a complete c or c++ source listing for the module
        :param location: a pre-existing filesystem directory where the
                         cpp file and .so will be written
        :param include_dirs: a list of include directory names
                             (each gets prefixed with -I)
        :param lib_dirs: a list of library search path directory names
                         (each gets prefixed with -L)
        :param libs: a list of libraries to link with
                     (each gets prefixed with -l)
        :param preargs: a list of extra compiler arguments
        :param rpaths: list of rpaths to use with Xlinker.
                       Defaults to `rpath_defaults`.
        :param py_module: if False, compile to a shared library, but
            do not import as a Python module.

        :returns: dynamically-imported python module of the compiled code.
            (unless py_module is False, in that case returns None.)

        :note 1: On Windows 7 with nvcc 3.1 we need to compile in the
                 real directory Otherwise nvcc never finish.

        """

        rpaths = list(rpaths)

        if sys.platform == "win32":
            # Remove some compilation args that cl.exe does not understand.
            # cl.exe is the compiler used by nvcc on Windows.
            for a in ["-Wno-write-strings", "-Wno-unused-label",
                      "-Wno-unused-variable", "-fno-math-errno"]:
                if a in preargs:
                    preargs.remove(a)
        if preargs is None:
            preargs = []
        else:
            preargs = list(preargs)
        if sys.platform != 'win32':
            preargs.append('-fPIC')
        no_opt = False
        cuda_root = config.cuda.root

        #The include dirs gived by the user should have precedence over
        #the standards ones.
        include_dirs = include_dirs + std_include_dirs()
        if os.path.abspath(os.path.split(__file__)[0]) not in include_dirs:
            include_dirs.append(os.path.abspath(os.path.split(__file__)[0]))

        libs = std_libs() + libs
        if 'cudart' not in libs:
            libs.append('cudart')

        lib_dirs = std_lib_dirs() + lib_dirs
        if cuda_root:
            lib_dirs.append(os.path.join(cuda_root, 'lib'))

            # from Benjamin Schrauwen April 14 2010
            if sys.platform != 'darwin':
                # No 64 bit CUDA libraries available on the mac, yet..
                lib_dirs.append(os.path.join(cuda_root, 'lib64'))

        if sys.platform == 'darwin':
            # On the mac, nvcc is not able to link using -framework
            # Python, so we have manually add the correct library and
            # paths
            darwin_python_lib = commands.getoutput('python-config --ldflags')
        else:
            # sometimes, the linker cannot find -lpython so we need to tell it
            # explicitly where it is located
            # this returns somepath/lib/python2.x
            python_lib = distutils.sysconfig.get_python_lib(plat_specific=1, \
                            standard_lib=1)
            python_lib = os.path.dirname(python_lib)
            if python_lib not in lib_dirs:
                lib_dirs.append(python_lib)

        cppfilename = os.path.join(location, 'mod.cu')
        cppfile = file(cppfilename, 'w')

        _logger.debug('Writing module C++ code to %s', cppfilename)
        ofiles = []
        rval = None

        cppfile.write(src_code)
        cppfile.close()
        lib_filename = os.path.join(location, '%s.%s' %
                (module_name, get_lib_extension()))

        _logger.debug('Generating shared lib %s', lib_filename)
        # TODO: Why do these args cause failure on gtx285 that has 1.3
        # compute capability? '--gpu-architecture=compute_13',
        # '--gpu-code=compute_13',
        #nvcc argument
        preargs1 = []
        for pa in preargs:
            for pattern in ['-O', '-arch=',
#.........这里部分代码省略.........
开发者ID:jmarinero,项目名称:Theano,代码行数:101,代码来源:nvcc_compiler.py


示例4: get_lib_extension

    A warning is displayed, so that the user is aware that cuda-based code is
    not going to work.
    Note that there is no point calling this function from outside of
    `cuda.__init__`, since it has no effect once the module is loaded.

    """
    global cuda_available, cuda_warning_is_displayed
    cuda_available = False

# cuda_ndarray compile and import
cuda_path = os.path.abspath(os.path.split(__file__)[0])

cuda_ndarray_loc = os.path.join(config.compiledir, 'cuda_ndarray')
cuda_ndarray_so = os.path.join(
        cuda_ndarray_loc, 'cuda_ndarray.' + get_lib_extension())
libcuda_ndarray_so = os.path.join(
        cuda_ndarray_loc, 'libcuda_ndarray.' + get_lib_extension())


def try_import():
    """
    Load the cuda_ndarray module if present and up to date.
    Return True if loaded correctly, otherwise return False.

    """
    cuda_files = (
        'cuda_ndarray.cu',
        'cuda_ndarray.cuh',
        'conv_full_kernel.cu',
        'cnmem.h',
开发者ID:MarcCote,项目名称:Theano,代码行数:30,代码来源:__init__.py


示例5: compile_str

    def compile_str(
            module_name, src_code,
            location=None, include_dirs=[], lib_dirs=[], libs=[], preargs=[],
            rpaths=rpath_defaults, py_module=True):
        """:param module_name: string (this has been embedded in the src_code
        :param src_code: a complete c or c++ source listing for the module
        :param location: a pre-existing filesystem directory where the
                         cpp file and .so will be written
        :param include_dirs: a list of include directory names
                             (each gets prefixed with -I)
        :param lib_dirs: a list of library search path directory names
                         (each gets prefixed with -L)
        :param libs: a list of libraries to link with
                     (each gets prefixed with -l)
        :param preargs: a list of extra compiler arguments
        :param rpaths: list of rpaths to use with Xlinker.
                       Defaults to `rpath_defaults`.
        :param py_module: if False, compile to a shared library, but
            do not import as a Python module.

        :returns: dynamically-imported python module of the compiled code.
            (unless py_module is False, in that case returns None.)

        :note 1: On Windows 7 with nvcc 3.1 we need to compile in the
                 real directory Otherwise nvcc never finish.

        """

        rpaths = list(rpaths)

        if sys.platform == "win32":
            # Remove some compilation args that cl.exe does not understand.
            # cl.exe is the compiler used by nvcc on Windows.
            for a in ["-Wno-write-strings", "-Wno-unused-label",
                      "-Wno-unused-variable", "-fno-math-errno"]:
                if a in preargs:
                    preargs.remove(a)
        if preargs is None:
            preargs = []
        else:
            preargs = list(preargs)
        if sys.platform != 'win32':
            preargs.append('-fPIC')
        no_opt = False
        cuda_root = config.cuda.root

        #The include dirs gived by the user should have precedence over
        #the standards ones.
        include_dirs = include_dirs + std_include_dirs()
        if os.path.abspath(os.path.split(__file__)[0]) not in include_dirs:
            include_dirs.append(os.path.abspath(os.path.split(__file__)[0]))

        libs = std_libs() + libs
        if 'cudart' not in libs:
            libs.append('cudart')

        lib_dirs = std_lib_dirs() + lib_dirs
        if cuda_root:
            lib_dirs.append(os.path.join(cuda_root, 'lib'))

            # from Benjamin Schrauwen April 14 2010
            if sys.platform != 'darwin':
                # OS X uses universal libraries
                lib_dirs.append(os.path.join(cuda_root, 'lib64'))

        if sys.platform != 'darwin':
            # sometimes, the linker cannot find -lpython so we need to tell it
            # explicitly where it is located
            # this returns somepath/lib/python2.x
            python_lib = distutils.sysconfig.get_python_lib(plat_specific=1, \
                            standard_lib=1)
            python_lib = os.path.dirname(python_lib)
            if python_lib not in lib_dirs:
                lib_dirs.append(python_lib)

        cppfilename = os.path.join(location, 'mod.cu')
        cppfile = open(cppfilename, 'w')

        _logger.debug('Writing module C++ code to %s', cppfilename)

        cppfile.write(src_code)
        cppfile.close()
        lib_filename = os.path.join(location, '%s.%s' %
                (module_name, get_lib_extension()))

        _logger.debug('Generating shared lib %s', lib_filename)
        # TODO: Why do these args cause failure on gtx285 that has 1.3
        # compute capability? '--gpu-architecture=compute_13',
        # '--gpu-code=compute_13',
        #nvcc argument
        preargs1 = []
        for pa in preargs:
            for pattern in ['-O', '-arch=', '-ccbin=',
                            '--fmad', '--ftz', '--maxrregcount',
                            '--prec-div', '--prec-sqrt',  '--use_fast_math',
                            '-fmad', '-ftz', '-maxrregcount',
                            '-prec-div', '-prec-sqrt', '-use_fast_math']:
                if pa.startswith(pattern):
                    preargs1.append(pa)
        preargs2 = [pa for pa in preargs
#.........这里部分代码省略.........
开发者ID:MLevinson-OR,项目名称:Theano,代码行数:101,代码来源:nvcc_compiler.py


示例6: get_lib_extension

_logger_name = 'pylearn2.sandbox.cuda_convnet.convnet_compile'
_logger = logging.getLogger(_logger_name)
#_logger.addHandler(logging.StreamHandler())
#_logger.setLevel(logging.DEBUG)

_logger.debug('importing')


cuda_convnet_loc = os.path.join(config.compiledir, 'cuda_convnet')
# In partial dependency order: the last ones depend on the first ones
cuda_convnet_file_sources = ('nvmatrix_kernels.cu', 'nvmatrix.cu',
                             'conv_util.cu', 'filter_acts.cu', 'img_acts.cu',
                             'weight_acts.cu')
cuda_convnet_so = os.path.join(cuda_convnet_loc,
        'cuda_convnet.' + get_lib_extension())
libcuda_convnet_so = os.path.join(cuda_convnet_loc,
        'libcuda_convnet.' + get_lib_extension())


def convnet_available():
    # If already compiled, OK
    if convnet_available.compiled:
        _logger.debug('already compiled')
        return True

    # If there was an error, do not try again
    if convnet_available.compile_error:
        _logger.debug('error last time')
        return False
开发者ID:BloodD,项目名称:pylearn2,代码行数:29,代码来源:convnet_compile.py


示例7: compile_str


#.........这里部分代码省略.........
                        not(ld == os.path.join(cuda_root, 'lib') or
                            ld == os.path.join(cuda_root, 'lib64'))]

        if sys.platform != 'darwin':
            # sometimes, the linker cannot find -lpython so we need to tell it
            # explicitly where it is located
            # this returns somepath/lib/python2.x
            python_lib = distutils.sysconfig.get_python_lib(plat_specific=1,
                                                            standard_lib=1)
            python_lib = os.path.dirname(python_lib)
            if python_lib not in lib_dirs:
                lib_dirs.append(python_lib)

        if (config.nvcc.cudafe == 'heuristic' and not
            any(marker in src_code for marker in ("__global__", "__device__",
                                                  "__host__", "<<<",
                                                  "nvmatrix.cuh"))):
            # only calls existing CUDA functions, can compile much faster
            cppfilename = os.path.join(location, 'mod.cpp')
            src_code = ("#include <cuda.h>\n"
                        "#include <cuda_runtime_api.h>\n" +
                        src_code)
        else:
            # contains CUDA host code or device functions, needs .cu extension
            cppfilename = os.path.join(location, 'mod.cu')

        with open(cppfilename, 'w') as cppfile:

            _logger.debug('Writing module C++ code to %s', cppfilename)
            cppfile.write(src_code)

        lib_filename = os.path.join(
            location, '%s.%s' %
            (module_name, get_lib_extension()))

        _logger.debug('Generating shared lib %s', lib_filename)
        # TODO: Why do these args cause failure on gtx285 that has 1.3
        # compute capability? '--gpu-architecture=compute_13',
        # '--gpu-code=compute_13',
        # nvcc argument
        preargs1 = []
        preargs2 = []
        for pa in preargs:
            if pa.startswith('-Wl,'):
                # the -rpath option is not understood by the Microsoft linker
                if sys.platform != 'win32' or not pa.startswith('-Wl,-rpath'):
                    preargs1.append('-Xlinker')
                    preargs1.append(pa[4:])
                continue
            for pattern in ['-O', '-arch=', '-ccbin=', '-G', '-g', '-I',
                            '-L', '--fmad', '--ftz', '--maxrregcount',
                            '--prec-div', '--prec-sqrt', '--use_fast_math',
                            '-fmad', '-ftz', '-maxrregcount',
                            '-prec-div', '-prec-sqrt', '-use_fast_math',
                            '--use-local-env', '--cl-version=', '-std=']:

                if pa.startswith(pattern):
                    preargs1.append(pa)
                    break
            else:
                preargs2.append(pa)

        # Don't put -G by default, as it slow things down.
        # We aren't sure if -g slow things down, so we don't put it by default.
        cmd = [nvcc_path, '-shared'] + preargs1
        if config.nvcc.compiler_bindir:
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:67,代码来源:nvcc_compiler.py


示例8: get_lib_extension

    # We must do those import to be able to create the full doc when nvcc
    # is not available
    import cuda_ndarray.cuda_ndarray as cuda
    from theano.sandbox.cuda.nvcc_compiler import NVCC_compiler
    import cuda_ndarray

    # Python 3 does not necessarily set __file__. May need manual setting.
    # The problem is known to occur on Windows 10 with Python 3.4 installed by Anaconda.
    try:
        cuda_ndarray.__file__
    except AttributeError:
        from theano.gof.cmodule import get_lib_extension

        # Only works with Python 3, but it's fine, because Python 2
        # guarantees to set __file__ when importing any module.
        cuda_ndarray.__file__ = os.path.join(cuda_ndarray.__path__._path[0], "cuda_ndarray." + get_lib_extension())
except ImportError:
    # Used to know that `cuda` could not be properly imported.
    cuda = None


class CudaNdarrayType(Type):

    typenum = 11  # Until hardware improves, this class deals with floats.

    dtype = "float32"

    Variable = None
    """
    This will be set to the Variable type corresponding to this class.
开发者ID:kmike,项目名称:Theano,代码行数:30,代码来源:type.py


示例9: set_cuda_disabled

    # We where asked to NEVER use the GPU
    set_cuda_disabled()

# If $TMPDIR is defined, nvopencc wants it to exist
if cuda_available and 'TMPDIR' in os.environ :
    tmpdir = os.environ['TMPDIR']
    if not os.path.exists(tmpdir) :
        os.makedirs(tmpdir)

# The path to source fiels for cuda_ndarray and cuda_devquery
cuda_path = os.path.abspath(os.path.split(__file__)[0])

# Early on detection and architecture selection via cuda_devquery
cuda_devquery_loc = os.path.join(config.compiledir, 'cuda_devquery')
cuda_devquery_so = os.path.join(cuda_devquery_loc,
                                'cuda_devquery.' + get_lib_extension())
cuda_devquery_cu = os.path.join(cuda_path, "cuda_devquery.cu")

def try_devquery():
    return check_module('cuda_devquery', cuda_devquery_so, [cuda_devquery_cu])

# Figure out if cuda_devquery needs compilation
if cuda_available and not try_devquery():
    get_lock()
    try:
        if not try_devquery():
            code = open(cuda_devquery_cu).read()
            if not os.path.exists(cuda_devquery_loc):
                os.makedirs(cuda_devquery_loc)
            try:
                nvcc_compiler.NVCC_compiler().compile_str(
开发者ID:ivannz,项目名称:Theano,代码行数:31,代码来源:__init__.py


示例10: compile_str

    def compile_str(
        module_name,
        src_code,
        location=None,
        include_dirs=[],
        lib_dirs=[],
        libs=[],
        preargs=[],
        rpaths=rpath_defaults,
        py_module=True,
    ):
        """:param module_name: string (this has been embedded in the src_code
        :param src_code: a complete c or c++ source listing for the module
        :param location: a pre-existing filesystem directory where the
                         cpp file and .so will be written
        :param include_dirs: a list of include directory names
                             (each gets prefixed with -I)
        :param lib_dirs: a list of library search path directory names
                         (each gets prefixed with -L)
        :param libs: a list of libraries to link with
                     (each gets prefixed with -l)
        :param preargs: a list of extra compiler arguments
        :param rpaths: list of rpaths to use with Xlinker.
                       Defaults to `rpath_defaults`.
        :param py_module: if False, compile to a shared library, but
            do not import as a Python module.

        :returns: dynamically-imported python module of the compiled code.
            (unless py_module is False, in that case returns None.)

        :note 1: On Windows 7 with nvcc 3.1 we need to compile in the
                 real directory Otherwise nvcc never finish.

        """

        rpaths = list(rpaths)

        if sys.platform == "win32":
            # Remove some compilation args that cl.exe does not understand.
            # cl.exe is the compiler used by nvcc on Windows.
            for a in ["-Wno-write-strings", "-Wno-unused-label", "-Wno-unused-variable", "-fno-math-errno"]:
                if a in preargs:
                    preargs.remove(a)
        if preargs is None:
            preargs = []
        else:
            preargs = list(preargs)
        if sys.platform != "win32":
            preargs.append("-fPIC")
        cuda_root = config.cuda.root

        # The include dirs gived by the user should have precedence over
        # the standards ones.
        include_dirs = include_dirs + std_include_dirs()
        if os.path.abspath(os.path.split(__file__)[0]) not in include_dirs:
            include_dirs.append(os.path.abspath(os.path.split(__file__)[0]))

        libs = std_libs() + libs
        if "cudart" not in libs:
            libs.append("cudart")

        lib_dirs = std_lib_dirs() + lib_dirs
        if any(ld == os.path.join(cuda_root, "lib") or ld == os.path.join(cuda_root, "lib64") for ld in lib_dirs):
            warnings.warn(
                "You have the cuda library directory in your "
                "lib_dirs. This has been known to cause problems "
                "and should not be done."
            )

        if sys.platform != "darwin":
            # sometimes, the linker cannot find -lpython so we need to tell it
            # explicitly where it is located
            # this returns somepath/lib/python2.x
            python_lib = distutils.sysconfig.get_python_lib(plat_specific=1, standard_lib=1)
            python_lib = os.path.dirname(python_lib)
            if python_lib not in lib_dirs:
                lib_dirs.append(python_lib)

        cppfilename = os.path.join(location, "mod.cu")
        cppfile = open(cppfilename, "w")

        _logger.debug("Writing module C++ code to %s", cppfilename)

        cppfile.write(src_code)
        cppfile.close()
        lib_filename = os.path.join(location, "%s.%s" % (module_name, get_lib_extension()))

        _logger.debug("Generating shared lib %s", lib_filename)
        # TODO: Why do these args cause failure on gtx285 that has 1.3
        # compute capability? '--gpu-architecture=compute_13',
        # '--gpu-code=compute_13',
        # nvcc argument
        preargs1 = []
        for pa in preargs:
            for pattern in [
                "-O",
                "-arch=",
                "-ccbin=",
                "-G",
                "-g",
#.........这里部分代码省略.........
开发者ID:dwf,项目名称:Theano,代码行数:101,代码来源:nvcc_compiler.py


示例11: get_lib_extension

    """Function used to disable cuda.

    A warning is displayed, so that the user is aware that cuda-based code is
    not going to work.
    Note that there is no point calling this function from outside of
    `cuda.__init__`, since it has no effect once the module is loaded.
    """
    global cuda_available, cuda_warning_is_displayed
    cuda_available = False


# cuda_ndarray compile and import
cuda_path = os.path.abspath(os.path.split(__file__)[0])

cuda_ndarray_loc = os.path.join(config.compiledir, "cuda_ndarray")
cuda_ndarray_so = os.path.join(cuda_ndarray_loc, "cuda_ndarray." + get_lib_extension())
libcuda_ndarray_so = os.path.join(cuda_ndarray_loc, "libcuda_ndarray." + get_lib_extension())


def try_import():
    """
    load the cuda_ndarray module if present and up to date
    return True if loaded correctly, otherwise return False
    """
    cuda_files = ("cuda_ndarray.cu", "cuda_ndarray.cuh", "conv_full_kernel.cu", "conv_kernel.cu")
    stat_times = [os.stat(os.path.join(cuda_path, cuda_file))[stat.ST_MTIME] for cuda_file in cuda_files]
    date = max(stat_times)
    if os.path.exists(cuda_ndarray_so):
        if date >= os.stat(cuda_ndarray_so)[stat.ST_MTIME]:
            return False
    try:
开发者ID:orhanf,项目名称:configs,代码行数:31,代码来源:__init__.py


示例12: get_lib_extension

root_dir = os.path.join(theano_ops.__path__[0], 'abll')
include_dir = os.path.join(root_dir, 'include')
src_dir = os.path.join(root_dir, 'src')
loc = os.path.join(config.compiledir, 'abll')
libs = [
    'cublas',
    'curand',
    'cufft',
]
srcs = (
    # In partial dependency order: the last ones depend on the first ones
    'abll/conv_bc01_fft.cu',
)

abll_so = os.path.join(loc, 'abll.' + get_lib_extension())
libabll_so = os.path.join(loc, 'libabll.' + get_lib_extension())


def is_available():
    # If already compiled, OK
    if is_available.compiled:
        _logger.debug('already compiled')
        return True

    # If there was an error, do not try again
    if is_available.compile_error:
        _logger.debug('error last time')
        return False

    # Else, we need CUDA
开发者ID:andersbll,项目名称:theano_ops,代码行数:30,代码来源:abll_compile.py


示例13: get_lib_extension

_logger.debug("importing")


cuda_convnet_loc = os.path.join(config.compiledir, "cuda_convnet")
# In partial dependency order: the last ones depend on the first ones
cuda_convnet_file_sources = (
    "nvmatrix_kernels.cu",
    "nvmatrix.cu",
    "conv_util.cu",
    "filter_acts.cu",
    "img_acts.cu",
    "maxunpool.cu",
    "weight_acts.cu",
)
cuda_convnet_so = os.path.join(cuda_convnet_loc, "cuda_convnet." + get_lib_extension())
libcuda_convnet_so = os.path.join(cuda_convnet_loc, "libcuda_convnet." + get_lib_extension())


def convnet_available():
    check_cuda(check_enabled=False)

    # If already compiled, OK
    if convnet_available.compiled:
        _logger.debug("already compiled")
        return True

    # If there was an error, do not try again
    if convnet_available.compile_error:
        _logger.debug("error last time")
        return False
开发者ID:ifp-uiuc,项目名称:pylearn2,代码行数:30,代码来源:convnet_compile.py



注:本文中的theano.gof.cmodule.get_lib_extension函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python compilelock.get_lock函数代码示例发布时间:2022-05-27
下一篇:
Python cc.CLinker类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap