本文整理汇总了Python中pytools.any函数的典型用法代码示例。如果您正苦于以下问题:Python any函数的具体用法?Python any怎么用?Python any使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了any函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: add_watches
def add_watches(self, watches):
"""Add quantities that are printed after every time step."""
from pytools import Record
class WatchInfo(Record):
pass
for watch in watches:
if isinstance(watch, tuple):
display, expr = watch
else:
display = watch
expr = watch
parsed = self._parse_expr(expr)
parsed, dep_data = self._get_expr_dep_data(parsed)
from pytools import any
self.have_nonlocal_watches = self.have_nonlocal_watches or \
any(dd.nonlocal_agg for dd in dep_data)
from pymbolic import compile
compiled = compile(parsed, [dd.varname for dd in dep_data])
watch_info = WatchInfo(display=display, parsed=parsed, dep_data=dep_data,
compiled=compiled)
self.watches.append(watch_info)
开发者ID:inducer,项目名称:pytools,代码行数:29,代码来源:log.py
示例2: get_binary_minmax_kernel
def get_binary_minmax_kernel(func, dtype_x, dtype_y, dtype_z):
if not np.float64 in [dtype_x, dtype_y]:
func = func +"f"
from pytools import any
if any(dt.kind == "f" for dt in [dtype_x, dtype_y, dtype_z]):
func = "f"+func
return get_binary_func_kernel(func, dtype_x, dtype_y, dtype_z)
开发者ID:aaahexing,项目名称:pycuda,代码行数:9,代码来源:elementwise.py
示例3: is_affine
def is_affine(self):
from pytools import any
has_high_order_geometry = any(
sum(mid) >= 2 and abs(mc) >= 1e-13
for mc_along_axis in self.modal_coeff.T
for mid, mc in zip(self.ldis.generate_mode_identifiers(), mc_along_axis)
)
return not has_high_order_geometry
开发者ID:binho58,项目名称:meshpy,代码行数:10,代码来源:test_gmsh_reader_hedge.py
示例4: get_binary_minmax_kernel
def get_binary_minmax_kernel(func, dtype_x, dtype_y, dtype_z, use_scalar):
if np.float64 not in [dtype_x, dtype_y]:
func = func + "f"
from pytools import any
if any(dt.kind == "f" for dt in [dtype_x, dtype_y, dtype_z]):
func = "f"+func
if use_scalar:
return get_binary_func_scalar_kernel(func, dtype_x, dtype_y, dtype_z)
else:
return get_binary_func_kernel(func, dtype_x, dtype_y, dtype_z)
开发者ID:EelcoHoogendoorn,项目名称:pycuda,代码行数:12,代码来源:elementwise.py
示例5: has_barrier_within
def has_barrier_within(kernel, sched_index):
sched_item = kernel.schedule[sched_index]
if isinstance(sched_item, EnterLoop):
loop_contents, _ = gather_schedule_subloop(kernel.schedule, sched_index)
from pytools import any
return any(isinstance(subsched_item, Barrier) for subsched_item in loop_contents)
elif isinstance(sched_item, Barrier):
return True
else:
return False
开发者ID:navjotk,项目名称:loopy,代码行数:12,代码来源:schedule.py
示例6: get_lpot_applier
def get_lpot_applier(self, kernels):
# needs to be separate method for caching
from pytools import any
if any(knl.is_complex_valued for knl in kernels):
value_dtype = self.density_discr.complex_dtype
else:
value_dtype = self.density_discr.real_dtype
from sumpy.qbx import LayerPotential
return LayerPotential(self.cl_context,
[self.expansion_getter(knl, self.qbx_order)
for knl in kernels],
value_dtypes=value_dtype)
开发者ID:sj90101,项目名称:pytential,代码行数:14,代码来源:__init__.py
示例7: __init__
def __init__(self, dtype_out,
neutral, reduce_expr, arguments=None,
map_exprs=[None],
name="reduce_kernel", options=[], preamble=""):
ctx = get_device().context
dtype_out = self.dtype_out = np.dtype(dtype_out)
max_group_size = None
trip_count = 0
self.n_exprs = len(map_exprs)
assert self.n_exprs>0
while True:
self.stage_1_inf = get_reduction_kernel(1, ctx,
dtype_out,
neutral, reduce_expr, arguments,
name=name+"_stage1", options=options, preamble=preamble,
map_exprs=map_exprs,
max_group_size=max_group_size)
kernel_max_wg_size = self.stage_1_inf.kernel.get_work_group_info(
cl.kernel_work_group_info.WORK_GROUP_SIZE,
ctx.devices[0])
if self.stage_1_inf.group_size<=kernel_max_wg_size:
break
else:
max_group_size = kernel_max_wg_size
trip_count += 1
assert trip_count<=2
self.stage_2_inf = get_reduction_kernel(2, ctx,
dtype_out,
neutral, reduce_expr, arguments=arguments,
name=name+"_stage2", options=options,
map_exprs=map_exprs,
preamble=preamble,
max_group_size=max_group_size)
from pytools import any
from pyopencl.tools import VectorArg
assert any(
isinstance(arg_tp, VectorArg)
for arg_tp in self.stage_1_inf.arg_types), \
"ReductionKernel can only be used with functions " \
"that have at least one vector argument"
开发者ID:spaghettisort,项目名称:gputools,代码行数:50,代码来源:oclmultireduction.py
示例8: get_p2p
def get_p2p(self, kernels):
# needs to be separate method for caching
from pytools import any
if any(knl.is_complex_valued for knl in kernels):
value_dtype = self.density_discr.complex_dtype
else:
value_dtype = self.density_discr.real_dtype
from sumpy.p2p import P2P
p2p = P2P(self.cl_context,
kernels, exclude_self=False, value_dtypes=value_dtype)
return p2p
开发者ID:sj90101,项目名称:pytential,代码行数:14,代码来源:__init__.py
示例9: get_lpot_applier_on_tgt_subset
def get_lpot_applier_on_tgt_subset(self, kernels):
# needs to be separate method for caching
from pytools import any
if any(knl.is_complex_valued for knl in kernels):
value_dtype = self.density_discr.complex_dtype
else:
value_dtype = self.density_discr.real_dtype
from pytential.qbx.direct import LayerPotentialOnTargetAndCenterSubset
from sumpy.expansion.local import VolumeTaylorLocalExpansion
return LayerPotentialOnTargetAndCenterSubset(
self.cl_context,
[VolumeTaylorLocalExpansion(knl, self.qbx_order)
for knl in kernels],
value_dtypes=value_dtype)
开发者ID:inducer,项目名称:pytential,代码行数:16,代码来源:__init__.py
示例10: finalize_multi_assign
def finalize_multi_assign(self, names, exprs, do_not_return, priority):
from pytools import any
from hedge.tools import is_zero
has_zero_assignees = any(is_zero(expr) for expr in exprs)
if has_zero_assignees:
if len(exprs) > 1:
raise RuntimeError("found aggregated zero constant assignment")
from hedge.optemplate import FlopCounter
flop_count = sum(FlopCounter()(expr) for expr in exprs)
if has_zero_assignees or flop_count == 0:
return Assign(names, exprs, priority=priority, dep_mapper_factory=self.dep_mapper_factory)
else:
return VectorExprAssign(
names=names,
exprs=exprs,
do_not_return=do_not_return,
dep_mapper_factory=self.dep_mapper_factory,
priority=priority,
)
开发者ID:paulcazeaux,项目名称:hedge,代码行数:23,代码来源:compiler.py
示例11: __init__
def __init__(self, ctx, dtype_out,
neutral, reduce_expr, map_expr=None, arguments=None,
name="reduce_kernel", options=[], preamble=""):
dtype_out = self.dtype_out = np.dtype(dtype_out)
self.stage_1_inf = get_reduction_kernel(ctx,
dtype_to_ctype(dtype_out), dtype_out.itemsize,
neutral, reduce_expr, map_expr, arguments,
name=name+"_stage1", options=options, preamble=preamble)
# stage 2 has only one input and no map expression
self.stage_2_inf = get_reduction_kernel(ctx,
dtype_to_ctype(dtype_out), dtype_out.itemsize,
neutral, reduce_expr,
name=name+"_stage2", options=options, preamble=preamble)
from pytools import any
from pyopencl.tools import VectorArg
assert any(
isinstance(arg_tp, VectorArg)
for arg_tp in self.stage_1_inf.arg_types), \
"ReductionKernel can only be used with functions that have at least one " \
"vector argument"
开发者ID:Almclean,项目名称:pyopencl,代码行数:24,代码来源:reduction.py
示例12: aggregate_assignments
def aggregate_assignments(self, instructions, result):
from pymbolic.primitives import Variable
# aggregation helpers -------------------------------------------------
def get_complete_origins_set(insn, skip_levels=0):
if skip_levels < 0:
skip_levels = 0
result = set()
for dep in insn.get_dependencies():
if isinstance(dep, Variable):
dep_origin = origins_map.get(dep.name, None)
if dep_origin is not None:
if skip_levels <= 0:
result.add(dep_origin)
result |= get_complete_origins_set(
dep_origin, skip_levels-1)
return result
var_assignees_cache = {}
def get_var_assignees(insn):
try:
return var_assignees_cache[insn]
except KeyError:
result = set(Variable(assignee)
for assignee in insn.get_assignees())
var_assignees_cache[insn] = result
return result
def aggregate_two_assignments(ass_1, ass_2):
names = ass_1.names + ass_2.names
from pymbolic.primitives import Variable
deps = (ass_1.get_dependencies() | ass_2.get_dependencies()) \
- set(Variable(name) for name in names)
return Assign(
names=names, exprs=ass_1.exprs + ass_2.exprs,
_dependencies=deps,
dep_mapper_factory=self.dep_mapper_factory,
priority=max(ass_1.priority, ass_2.priority))
# main aggregation pass -----------------------------------------------
origins_map = dict(
(assignee, insn)
for insn in instructions
for assignee in insn.get_assignees())
from pytools import partition
unprocessed_assigns, other_insns = partition(
lambda insn: isinstance(insn, Assign),
instructions)
# filter out zero-flop-count assigns--no need to bother with those
processed_assigns, unprocessed_assigns = partition(
lambda ass: ass.flop_count() == 0,
unprocessed_assigns)
# filter out zero assignments
from pytools import any
from hedge.tools import is_zero
i = 0
while i < len(unprocessed_assigns):
my_assign = unprocessed_assigns[i]
if any(is_zero(expr) for expr in my_assign.exprs):
processed_assigns.append(unprocessed_assigns.pop())
else:
i += 1
# greedy aggregation
while unprocessed_assigns:
my_assign = unprocessed_assigns.pop()
my_deps = my_assign.get_dependencies()
my_assignees = get_var_assignees(my_assign)
agg_candidates = []
for i, other_assign in enumerate(unprocessed_assigns):
other_deps = other_assign.get_dependencies()
other_assignees = get_var_assignees(other_assign)
if ((my_deps & other_deps
or my_deps & other_assignees
or other_deps & my_assignees)
and my_assign.priority == other_assign.priority):
agg_candidates.append((i, other_assign))
did_work = False
if agg_candidates:
my_indirect_origins = get_complete_origins_set(
my_assign, skip_levels=1)
for other_assign_index, other_assign in agg_candidates:
if self.max_vectors_in_batch_expr is not None:
new_assignee_count = len(
set(my_assign.get_assignees())
#.........这里部分代码省略.........
开发者ID:felipeh,项目名称:hedge,代码行数:101,代码来源:compiler.py
示例13: match_dtype_to_c_struct
def match_dtype_to_c_struct(device, name, dtype, context=None):
"""Return a tuple `(dtype, c_decl)` such that the C struct declaration
in `c_decl` and the structure :class:`numpy.dtype` instance `dtype`
have the same memory layout.
Note that *dtype* may be modified from the value that was passed in,
for example to insert padding.
(As a remark on implementation, this routine runs a small kernel on
the given *device* to ensure that :mod:`numpy` and C offsets and
sizes match.)
.. versionadded: 2013.1
This example explains the use of this function::
>>> import numpy as np
>>> import pyopencl as cl
>>> import pyopencl.tools
>>> ctx = cl.create_some_context()
>>> dtype = np.dtype([("id", np.uint32), ("value", np.float32)])
>>> dtype, c_decl = pyopencl.tools.match_dtype_to_c_struct(
... ctx.devices[0], 'id_val', dtype)
>>> print c_decl
typedef struct {
unsigned id;
float value;
} id_val;
>>> print dtype
[('id', '<u4'), ('value', '<f4')]
>>> cl.tools.get_or_register_dtype('id_val', dtype)
As this example shows, it is important to call
:func:`get_or_register_dtype` on the modified `dtype` returned by this
function, not the original one.
"""
fields = sorted(dtype.fields.iteritems(),
key=lambda (name, (dtype, offset)): offset)
c_fields = []
for field_name, (field_dtype, offset) in fields:
c_fields.append(" %s %s;" % (dtype_to_ctype(field_dtype), field_name))
c_decl = "typedef struct {\n%s\n} %s;\n\n" % (
"\n".join(c_fields),
name)
cdl = _CDeclList(device)
for field_name, (field_dtype, offset) in fields:
cdl.add_dtype(field_dtype)
pre_decls = cdl.get_declarations()
offset_code = "\n".join(
"result[%d] = pycl_offsetof(%s, %s);" % (i+1, name, field_name)
for i, (field_name, (field_dtype, offset)) in enumerate(fields))
src = r"""
#define pycl_offsetof(st, m) \
((size_t) ((__local char *) &(dummy.m) \
- (__local char *)&dummy ))
%(pre_decls)s
%(my_decl)s
__kernel void get_size_and_offsets(__global size_t *result)
{
result[0] = sizeof(%(my_type)s);
__local %(my_type)s dummy;
%(offset_code)s
}
""" % dict(
pre_decls=pre_decls,
my_decl=c_decl,
my_type=name,
offset_code=offset_code)
if context is None:
context = cl.Context([device])
queue = cl.CommandQueue(context)
prg = cl.Program(context, src)
knl = prg.build(devices=[device]).get_size_and_offsets
import pyopencl.array # noqa
result_buf = cl.array.empty(queue, 1+len(fields), np.uintp)
knl(queue, (1,), (1,), result_buf.data)
queue.finish()
size_and_offsets = result_buf.get()
size = int(size_and_offsets[0])
from pytools import any
offsets = size_and_offsets[1:]
if any(ofs >= size for ofs in offsets):
# offsets not plausible
#.........这里部分代码省略.........
开发者ID:DirkHaehnel,项目名称:pyopencl,代码行数:101,代码来源:tools.py
示例14: do_not_vectorize
def do_not_vectorize(self):
from pytools import any
return self.complex_kernel and any(dev.type == cl.device_type.CPU for dev in self.context.devices)
开发者ID:braincorp,项目名称:pyopencl,代码行数:4,代码来源:algorithm.py
示例15: is_name_conflicting
def is_name_conflicting(self, name):
from pytools import any
return any(
_is_var_name_conflicting(name, other_name)
for other_name in self.existing_names)
开发者ID:cmsquared,项目名称:loopy,代码行数:5,代码来源:__init__.py
示例16: map_logical_or
def map_logical_or(self, expr):
from pytools import any
return any(self.rec(ch) for ch in expr.children)
开发者ID:FInAT,项目名称:pymbolic,代码行数:3,代码来源:evaluator.py
示例17: __call__
def __call__(self, queue, n_objects, *args, **kwargs):
"""
:arg args: arguments corresponding to arg_decls in the constructor.
:class:`pyopencl.array.Array` are not allowed directly and should
be passed as their :attr:`pyopencl.array.Array.data` attribute instead.
:arg allocator: optionally, the allocator to use to allocate new
arrays.
:arg omit_lists: An iterable of list names that should *not* be built
with this invocation. The kernel code may *not* call ``APPEND_name``
for these omitted lists. If it does, undefined behavior will result.
The returned *lists* dictionary will not contain an entry for names
in *omit_lists*.
:arg wait_for: |explain-waitfor|
:returns: a tuple ``(lists, event)``, where
*lists* a mapping from (built) list names to objects which
have attributes
* ``count`` for the total number of entries in all lists combined
* ``lists`` for the array containing all lists.
* ``starts`` for the array of starting indices in `lists`.
`starts` is built so that it has n+1 entries, so that
the *i*'th entry is the start of the *i*'th list, and the
*i*'th entry is the index one past the *i*'th list's end,
even for the last list.
This implies that all lists are contiguous.
*event* is a :class:`pyopencl.Event` for dependency management.
.. versionchanged:: 2016.2
Added omit_lists.
"""
if n_objects >= int(np.iinfo(np.int32).max):
index_dtype = np.int64
else:
index_dtype = np.int32
index_dtype = np.dtype(index_dtype)
allocator = kwargs.pop("allocator", None)
omit_lists = kwargs.pop("omit_lists", [])
wait_for = kwargs.pop("wait_for", None)
if kwargs:
raise TypeError("invalid keyword arguments: '%s'" % ", ".join(kwargs))
for l in omit_lists:
if not any(l == name for name, _ in self.list_names_and_dtypes):
raise ValueError("invalid list name '%s' in omit_lists")
result = {}
count_list_args = []
if wait_for is None:
wait_for = []
count_kernel = self.get_count_kernel(index_dtype)
write_kernel = self.get_write_kernel(index_dtype)
scan_kernel = self.get_scan_kernel(index_dtype)
# {{{ allocate memory for counts
for name, dtype in self.list_names_and_dtypes:
if name in self.count_sharing:
continue
if name in omit_lists:
count_list_args.append(None)
continue
counts = cl.array.empty(queue,
(n_objects + 1), index_dtype, allocator=allocator)
counts[-1] = 0
wait_for = wait_for + counts.events
# The scan will turn the "counts" array into the "starts" array
# in-place.
result[name] = BuiltList(starts=counts)
count_list_args.append(counts.data)
# }}}
if self.debug:
gsize = (1,)
lsize = (1,)
elif self.complex_kernel and queue.device.type == cl.device_type.CPU:
gsize = (4*queue.device.max_compute_units,)
lsize = (1,)
else:
from pyopencl.array import splay
gsize, lsize = splay(queue, n_objects)
count_event = count_kernel(queue, gsize, lsize,
*(tuple(count_list_args) + args + (n_objects,)),
**dict(wait_for=wait_for))
# {{{ run scans
scan_events = []
for name, dtype in self.list_names_and_dtypes:
if name in self.count_sharing:
#.........这里部分代码省略.........
开发者ID:hrfuller,项目名称:pyopencl,代码行数:101,代码来源:algorithm.py
示例18: __init__
#.........这里部分代码省略.........
:arg generate_template: a snippet of C as described below
:arg arg_decls: A string of comma-separated C argument declarations.
:arg count_sharing: A mapping consisting of `(child, mother)`
indicating that `mother` and `child` will always have the
same number of indices, and the `APPEND` to `mother`
will always happen *before* the `APPEND` to the child.
:arg name_prefix: the name prefix to use for the compiled kernels
:arg options: OpenCL compilation options for kernels using
*generate_template*.
:arg complex_kernel: If `True`, prevents vectorization on CPUs.
:arg eliminate_empty_output_lists: A Python list of list names
for which the empty output lists are eliminated.
*generate_template* may use the following C macros/identifiers:
* `index_type`: expands to C identifier for the index type used
for the calculation
* `USER_ARG_DECL`: expands to the C declarator for `arg_decls`
* `USER_ARGS`: a list of C argument values corresponding to
`user_arg_decl`
* `LIST_ARG_DECL`: expands to a C argument list representing the
data for the output lists. These are escaped prefixed with
`"plg_"` so as to not interfere with user-provided names.
* `LIST_ARGS`: a list of C argument values corresponding to
`LIST_ARG_DECL`
* `APPEND_name(entry)`: inserts `entry` into the list `name`.
*entry* must be a valid C expression of the correct type.
All argument-list related macros have a trailing comma included
if they are non-empty.
*generate_template* must supply a function:
.. code-block:: c
void generate(USER_ARG_DECL LIST_ARG_DECL index_type i)
{
APPEND_mylist(5);
}
Internally, the `kernel_template` is expanded (at least) twice. Once,
for a 'counting' stage where the size of all the lists is determined,
and a second time, for a 'generation' stage where the lists are
actually filled. A `generate` function that has side effects beyond
calling `append` is therefore ill-formed.
.. versionchanged:: 2018.1
Change *eliminate_empty_output_lists* argument type from `bool` to
`list`.
"""
if devices is None:
devices = context.devices
if count_sharing is None:
count_sharing = {}
self.context = context
self.devices = devices
self.list_names_and_dtypes = list_names_and_dtypes
self.generate_template = generate_template
from pyopencl.tools import parse_arg_list
self.arg_decls = parse_arg_list(arg_decls)
# To match with the signature of the user-supplied generate(), arguments
# can't appear to have offsets.
arg_decls_no_offset = []
from pyopencl.tools import VectorArg
for arg in self.arg_decls:
if isinstance(arg, VectorArg) and arg.with_offset:
arg = VectorArg(arg.dtype, arg.name)
arg_decls_no_offset.append(arg)
self.arg_decls_no_offset = arg_decls_no_offset
self.count_sharing = count_sharing
self.name_prefix = name_prefix
self.preamble = preamble
self.options = options
self.debug = debug
self.complex_kernel = complex_kernel
if eliminate_empty_output_lists is True:
eliminate_empty_output_lists = \
[name for name, _ in self.list_names_and_dtypes]
if eliminate_empty_output_lists is False:
eliminate_empty_output_lists = []
self.eliminate_empty_output_lists = eliminate_empty_output_lists
for list_name in self.eliminate_empty_output_lists:
if not any(list_name == name for name, _ in self.list_names_and_dtypes):
raise ValueError(
"invalid list name '%s' in eliminate_empty_output_lists"
% list_name)
开发者ID:inducer,项目名称:pyopencl,代码行数:101,代码来源:algorithm.py
示例19: __call__
def __call__(self, queue, n_objects, *args, **kwargs):
"""
:arg args: arguments corresponding to arg_decls in the constructor.
Array-like arguments must be either
1D :class:`pyopencl.array.Array` objects or
:class:`pyopencl.MemoryObject` objects, of which the latter
can be obtained from a :class:`pyopencl.array.Array` using the
:attr:`pyopencl.array.Array.data` attribute.
:arg allocator: optionally, the allocator to use to allocate new
arrays.
:arg omit_lists: An iterable of list names that should *not* be built
with this invocation. The kernel code may *not* call ``APPEND_name``
for these omitted lists. If it does, undefined behavior will result.
The returned *lists* dictionary will not contain an entry for names
in *omit_lists*.
:arg wait_for: |explain-waitfor|
:returns: a tuple ``(lists, event)``, where
*lists* a mapping from (built) list names to objects which
have attributes
* ``count`` for the total number of entries in all lists combined
* ``lists`` for the array containing all lists.
* ``starts`` for the array of starting indices in `lists`.
`starts` is built so that it has n+1 entries, so that
the *i*'th entry is the start of the *i*'th list, and the
*i*'th entry is the index one past the *i*'th list's end,
even for the last list.
This implies that all lists are contiguous.
If the list name is specified in *eliminate_empty_output_lists*
constructor argument, *lists* has two additional attributes
``num_nonempty_lists`` and ``nonempty_indices``
* ``num_nonempty_lists`` for the number of nonempty lists.
* ``nonempty_indices`` for the index of nonempty list in input objects.
In this case, `starts` has `num_nonempty_lists` + 1 entries. The *i*'s
entry is the start of the *i*'th nonempty list, which is generated by
the object with index *nonempty_indices[i]*.
*event* is a :class:`pyopencl.Event` for dependency management.
.. versionchanged:: 2016.2
Added omit_lists.
"""
if n_objects >= int(np.iinfo(np.int32).max):
index_dtype = np.int64
else:
index_dtype = np.int32
index_dtype = np.dtype(index_dtype)
allocator = kwargs.pop("allocator", None)
omit_lists = kwargs.pop("omit_lists", [])
wait_for = kwargs.pop("wait_for", None)
if kwargs:
raise TypeError("invalid keyword arguments: '%s'" % ", ".join(kwargs))
for oml in omit_lists:
if not any(oml == name for name, _ in self.list_names_and_dtypes):
raise ValueError("invalid list name '%s' in omit_lists")
result = {}
count_list_args = []
if wait_for is None:
wait_for = []
count_kernel = self.get_count_kernel(index_dtype)
write_kernel = self.get_write_kernel(index_dtype)
scan_kernel = self.get_scan_kernel(index_dtype)
if self.eliminate_empty_output_lists:
compress_kernel = self.get_compress_kernel(index_dtype)
data_args = []
for i, (arg_descr, arg_val) in enumerate(zip(self.arg_decls, args)):
from pyopencl.tools import VectorArg
if isinstance(arg_descr, VectorArg):
from pyopencl import MemoryObject
if isinstance(arg_val, MemoryObject):
data_args.append(arg_val)
if arg_descr.with_offset:
raise ValueError(
"with_offset=True specified for argument %d "
"but the argument is not an array" % i)
continue
if arg_val.ndim != 1:
raise ValueError("argument %d is a multidimensional array" % i)
data_args.append(arg_val.base_data)
if arg_descr.with_offset:
data_args.append(arg_val.offset)
else:
data_args.append(arg_val)
del args
data_args = tuple(data_args)
#.........这里部分代码省略.........
开发者ID:inducer,项目名称:pyopencl,代码行数:101,代码来源:algorithm.py
注:本文中的pytools.any函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论