本文整理汇总了Python中pytools.single_valued函数的典型用法代码示例。如果您正苦于以下问题:Python single_valued函数的具体用法?Python single_valued怎么用?Python single_valued使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了single_valued函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _bmat
def _bmat(blocks, dtypes):
from pytools import single_valued
from pytential.symbolic.matrix import is_zero
nrows = blocks.shape[0]
ncolumns = blocks.shape[1]
# "block row starts"/"block column starts"
brs = np.cumsum([0]
+ [single_valued(blocks[ibrow, ibcol].shape[0]
for ibcol in range(ncolumns)
if not is_zero(blocks[ibrow, ibcol]))
for ibrow in range(nrows)])
bcs = np.cumsum([0]
+ [single_valued(blocks[ibrow, ibcol].shape[1]
for ibrow in range(nrows)
if not is_zero(blocks[ibrow, ibcol]))
for ibcol in range(ncolumns)])
result = np.zeros((brs[-1], bcs[-1]),
dtype=np.find_common_type(dtypes, []))
for ibcol in range(ncolumns):
for ibrow in range(nrows):
result[brs[ibrow]:brs[ibrow + 1], bcs[ibcol]:bcs[ibcol + 1]] = \
blocks[ibrow, ibcol]
return result
开发者ID:inducer,项目名称:pytential,代码行数:28,代码来源:execution.py
示例2: map_ref_diff_op_binding
def map_ref_diff_op_binding(self, expr):
try:
return self.expr_to_var[expr]
except KeyError:
all_diffs = [diff
for diff in self.diff_ops
if diff.op.equal_except_for_axis(expr.op)
and diff.field == expr.field]
names = [self.get_var_name() for d in all_diffs]
from pytools import single_valued
op_class=single_valued(type(d.op) for d in all_diffs)
from hedge.optemplate.operators import \
ReferenceQuadratureStiffnessTOperator
if isinstance(op_class, ReferenceQuadratureStiffnessTOperator):
assign_class = QuadratureDiffBatchAssign
else:
assign_class = DiffBatchAssign
self.code.append(
assign_class(
names=names,
op_class=op_class,
operators=[d.op for d in all_diffs],
field=self.rec(
single_valued(d.field for d in all_diffs)),
dep_mapper_factory=self.dep_mapper_factory))
from pymbolic import var
for n, d in zip(names, all_diffs):
self.expr_to_var[d] = var(n)
return self.expr_to_var[expr]
开发者ID:felipeh,项目名称:hedge,代码行数:35,代码来源:compiler.py
示例3: make_superblocks
def make_superblocks(devdata, struct_name, single_item, multi_item, extra_fields={}):
from hedge.backends.cuda.tools import pad_and_join
# single_item = [([ block1, block2, ... ], decl), ...]
# multi_item = [([ [ item1, item2, ...], ... ], decl), ...]
multi_blocks = [
["".join(s) for s in part_data]
for part_data, part_decls in multi_item]
block_sizes = [
max(len(b) for b in part_blocks)
for part_blocks in multi_blocks]
from pytools import single_valued
block_count = single_valued(
len(si_part_blocks) for si_part_blocks, si_part_decl in single_item)
from cgen import Struct, ArrayOf
struct_members = []
for part_data, part_decl in single_item:
assert block_count == len(part_data)
single_valued(len(block) for block in part_data)
struct_members.append(part_decl)
for part_data, part_decl in multi_item:
struct_members.append(
ArrayOf(part_decl, max(len(s) for s in part_data)))
superblocks = []
for superblock_num in range(block_count):
data = ""
for part_data, part_decl in single_item:
data += part_data[superblock_num]
for part_blocks, part_size in zip(multi_blocks, block_sizes):
assert block_count == len(part_blocks)
data += pad(part_blocks[superblock_num], part_size)
superblocks.append(data)
superblock_size = devdata.align(
single_valued(len(sb) for sb in superblocks))
data = pad_and_join(superblocks, superblock_size)
assert len(data) == superblock_size*block_count
class SuperblockedDataStructure(Record):
pass
return SuperblockedDataStructure(
struct=Struct(struct_name, struct_members),
device_memory=cuda.to_device(data),
block_bytes=superblock_size,
data=data,
**extra_fields
)
开发者ID:paulcazeaux,项目名称:hedge,代码行数:57,代码来源:tools.py
示例4: multi_put
def multi_put(arrays, dest_indices, dest_shape=None, out=None, queue=None):
if not len(arrays):
return []
from pytools import single_valued
a_dtype = single_valued(a.dtype for a in arrays)
a_allocator = arrays[0].allocator
context = dest_indices.context
queue = queue or dest_indices.queue
vec_count = len(arrays)
if out is None:
out = [Array(context, dest_shape, a_dtype, allocator=a_allocator, queue=queue) for i in range(vec_count)]
else:
if a_dtype != single_valued(o.dtype for o in out):
raise TypeError("arrays and out must have the same dtype")
if len(out) != vec_count:
raise ValueError("out and arrays must have the same length")
if len(dest_indices.shape) != 1:
raise ValueError("src_indices must be 1D")
chunk_size = _builtin_min(vec_count, 10)
def make_func_for_chunk_size(chunk_size):
knl = elementwise.get_put_kernel(a_dtype, dest_indices.dtype, vec_count=chunk_size)
knl.set_block_shape(*dest_indices._block)
return knl
knl = make_func_for_chunk_size(chunk_size)
for start_i in range(0, len(arrays), chunk_size):
chunk_slice = slice(start_i, start_i + chunk_size)
if start_i + chunk_size > vec_count:
knl = make_func_for_chunk_size(vec_count - start_i)
gs, ls = dest_indices.get_sizes(
queue, knl.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, queue.device)
)
knl(
queue,
gs,
ls,
*(
[o.data for o in out[chunk_slice]]
+ [dest_indices.data]
+ [i.data for i in arrays[chunk_slice]]
+ [dest_indices.size]
)
)
return out
开发者ID:EyNuel,项目名称:pyopencl,代码行数:56,代码来源:array.py
示例5: __call__
def __call__(self, evaluate_subexpr, stats_callback=None):
vectors = [evaluate_subexpr(vec_expr)
for vec_expr in self.vector_deps]
scalars = [evaluate_subexpr(scal_expr)
for scal_expr in self.scalar_deps]
from pytools import single_valued
shape = single_valued(vec.shape for vec in vectors)
kernel_rec = self.get_kernel(
tuple(v.dtype for v in vectors),
tuple(s.dtype for s in scalars))
results = [gpuarray.empty(
shape, kernel_rec.result_dtype, self.allocator)
for expr in self.result_vec_expr_info_list]
size = results[0].size
args = ([r.gpudata for r in results]
+[v.gpudata for v in vectors]
+scalars
+[size])
if stats_callback is not None:
stats_callback(size, self,
kernel_rec.kernel.prepared_timed_call(vectors[0]._grid, results[0]._block, *args))
else:
kernel_rec.kernel.prepared_async_call(vectors[0]._grid, results[0]._block, self.stream, *args)
return results
开发者ID:gimac,项目名称:hedge,代码行数:30,代码来源:vector_expr.py
示例6: exec_diff_batch_assign
def exec_diff_batch_assign(self, insn):
field = self.rec(insn.field)
discr = self.executor.discr
if discr.instrumented:
discr.diff_counter.add(discr.dimensions)
discr.diff_flop_counter.add(discr.dimensions*(
self.executor.diff_rst_flops + self.executor.diff_rescale_one_flops))
repr_op = insn.operators[0]
from hedge.optemplate.operators import \
ReferenceQuadratureStiffnessTOperator
if isinstance(repr_op, ReferenceQuadratureStiffnessTOperator):
eg, = discr.element_groups
from pytools import single_valued
q_info = discr.get_cuda_elgroup_quadrature_info(
eg, single_valued(op.quadrature_tag for op in insn.operators))
kernel = discr.diff_kernel(
aligned_preimage_dofs_per_microblock
=q_info.aligned_dofs_per_microblock,
preimage_dofs_per_el=q_info.ldis_quad_info.node_count())
rst_diff = kernel(repr_op, field)
else:
rst_diff = self.executor.diff_kernel(repr_op, field)
return [(name, rst_diff[op.rst_axis])
for name, op in zip(insn.names, insn.operators)], []
开发者ID:felipeh,项目名称:hedge,代码行数:30,代码来源:execute.py
示例7: index_list_backend
def index_list_backend(self, ilists):
from pytools import single_valued
ilist_length = single_valued(len(il) for il in ilists)
assert ilist_length == self.plan.dofs_per_face
from cgen import Typedef, POD
from pytools import flatten
flat_ilists_uncast = numpy.array(list(flatten(ilists)))
if numpy.max(flat_ilists_uncast) >= 256:
tp = numpy.uint16
else:
tp = numpy.uint8
flat_ilists = numpy.asarray(flat_ilists_uncast, dtype=tp)
assert (flat_ilists == flat_ilists_uncast).all()
return GPUIndexLists(
type=tp,
code=[Typedef(POD(tp, "index_list_entry_t"))],
device_memory=cuda.to_device(flat_ilists),
bytes=flat_ilists.size * flat_ilists.itemsize,
)
开发者ID:gimac,项目名称:hedge,代码行数:26,代码来源:fluxgather.py
示例8: nd_quad_submesh
def nd_quad_submesh(node_tuples):
"""Return a list of tuples of indices into the node list that
generate a tesselation of the reference element.
:arg node_tuples: A list of tuples *(i, j, ...)* of integers
indicating node positions inside the unit element. The
returned list references indices in this list.
:func:`pytools.generate_nonnegative_integer_tuples_below`
may be used to generate *node_tuples*.
See also :func:`modepy.tools.simplex_submesh`.
"""
from pytools import single_valued, add_tuples
dims = single_valued(len(nt) for nt in node_tuples)
node_dict = dict(
(ituple, idx)
for idx, ituple in enumerate(node_tuples))
from pytools import generate_nonnegative_integer_tuples_below as gnitb
result = []
for current in node_tuples:
try:
result.append(tuple(
node_dict[add_tuples(current, offset)]
for offset in gnitb(2, dims)))
except KeyError:
pass
return result
开发者ID:mattwala,项目名称:meshmode,代码行数:34,代码来源:tools.py
示例9: __call__
def __call__(self, evaluate_subexpr, stats_callback=None):
vectors = [evaluate_subexpr(vec_expr)
for vec_expr in self.vector_deps]
scalars = [evaluate_subexpr(scal_expr)
for scal_expr in self.scalar_deps]
from pytools import single_valued
shape = single_valued(vec.shape for vec in vectors)
kernel_rec = self.get_kernel(
tuple(v.dtype for v in vectors),
tuple(s.dtype for s in scalars))
results = [numpy.empty(shape, kernel_rec.result_dtype)
for vei in self.result_vec_expr_info_list]
size = results[0].size
args = (results+vectors+scalars)
if stats_callback is not None:
timer = stats_callback(size, self)
sub_timer = timer.start_sub_timer()
kernel_rec.kernel(*args)
sub_timer.stop().submit()
else:
kernel_rec.kernel(*args)
return results
开发者ID:allansnielsen,项目名称:hedge,代码行数:28,代码来源:vector_expr.py
示例10: _vis_connectivity
def _vis_connectivity(self):
"""
:return: an array of shape
``(vis_discr.nelements,nsubelements,primitive_element_size)``
"""
# Assume that we're using modepy's default node ordering.
from pytools import generate_nonnegative_integer_tuples_summing_to_at_most \
as gnitstam, single_valued
vis_order = single_valued(
group.order for group in self.vis_discr.groups)
node_tuples = list(gnitstam(vis_order, self.vis_discr.dim))
from modepy.tools import submesh
el_connectivity = np.array(
submesh(node_tuples),
dtype=np.intp)
nelements = sum(group.nelements for group in self.vis_discr.groups)
vis_connectivity = np.empty(
(nelements,) + el_connectivity.shape, dtype=np.intp)
el_nr_base = 0
for group in self.vis_discr.groups:
assert len(node_tuples) == group.nunit_nodes
vis_connectivity[el_nr_base:el_nr_base+group.nelements] = (
np.arange(
el_nr_base*group.nunit_nodes,
(el_nr_base+group.nelements)*group.nunit_nodes,
group.nunit_nodes
)[:, np.newaxis, np.newaxis]
+ el_connectivity)
el_nr_base += group.nelements
return vis_connectivity
开发者ID:mattwala,项目名称:meshmode,代码行数:35,代码来源:visualization.py
示例11: multi_put
def multi_put(arrays, dest_indices, dest_shape=None, out=None, stream=None):
if not len(arrays):
return []
from pytools import single_valued
a_dtype = single_valued(a.dtype for a in arrays)
a_allocator = arrays[0].allocator
vec_count = len(arrays)
if out is None:
out = [GPUArray(dest_shape, a_dtype, a_allocator)
for i in range(vec_count)]
else:
if a_dtype != single_valued(o.dtype for o in out):
raise TypeError("arrays and out must have the same dtype")
if len(out) != vec_count:
raise ValueError("out and arrays must have the same length")
if len(dest_indices.shape) != 1:
raise ValueError("src_indices must be 1D")
chunk_size = _builtin_min(vec_count, 10)
def make_func_for_chunk_size(chunk_size):
func = elementwise.get_put_kernel(
a_dtype, dest_indices.dtype, vec_count=chunk_size)
func.set_block_shape(*dest_indices._block)
return func
func = make_func_for_chunk_size(chunk_size)
for start_i in range(0, len(arrays), chunk_size):
chunk_slice = slice(start_i, start_i+chunk_size)
if start_i + chunk_size > vec_count:
func = make_func_for_chunk_size(vec_count-start_i)
func.prepared_async_call(dest_indices._grid, stream,
dest_indices.gpudata,
*([o.gpudata for o in out[chunk_slice]]
+ [i.gpudata for i in arrays[chunk_slice]]
+ [dest_indices.size]))
return out
开发者ID:minrk,项目名称:PyCUDA,代码行数:45,代码来源:gpuarray.py
示例12: make_flux_batch_assign
def make_flux_batch_assign(self, names, expressions, repr_op):
from pytools import single_valued
quadrature_tag = single_valued(
wdflux.quadrature_tag
for wdflux in expressions)
return CUDAFluxBatchAssign(names=names, expressions=expressions, repr_op=repr_op,
dep_mapper_factory=self.dep_mapper_factory,
quadrature_tag=quadrature_tag)
开发者ID:felipeh,项目名称:hedge,代码行数:9,代码来源:execute.py
示例13: map_int_g
def map_int_g(self, expr, name_hint=None):
try:
return self.expr_to_var[expr]
except KeyError:
# make sure operator assignments stand alone and don't get muddled
# up in vector arithmetic
density_var = self.assign_to_new_var(self.rec(expr.density))
group = self.group_to_operators[self.op_group_features(expr)]
names = [self.get_var_name() for op in group]
kernel_to_index = {}
kernels = []
for op in group:
if op.kernel not in kernel_to_index:
kernel_to_index[op.kernel] = len(kernels)
kernels.append(op.kernel)
from pytools import single_valued
from sumpy.kernel import AxisTargetDerivativeRemover
atdr = AxisTargetDerivativeRemover()
base_kernel = single_valued(
atdr(kernel) for kernel in kernels)
for op in group:
assert op.qbx_forced_limit in [-1, 0, 1]
kernel_arguments = dict(
(arg_name, self.rec(arg_val))
for arg_name, arg_val in six.iteritems(expr.kernel_arguments))
outputs = [
LayerPotentialOutput(
name=name,
kernel_index=kernel_to_index[op.kernel],
target_name=op.target,
qbx_forced_limit=op.qbx_forced_limit,
)
for name, op in zip(names, group)
]
self.code.append(
LayerPotentialInstruction(
outputs=outputs,
kernels=tuple(kernels),
kernel_arguments=kernel_arguments,
base_kernel=base_kernel,
density=density_var,
source=expr.source,
priority=max(getattr(op, "priority", 0) for op in group),
dep_mapper_factory=self.dep_mapper_factory))
from pymbolic.primitives import Variable
for name, group_expr in zip(names, group):
self.expr_to_var[group_expr] = Variable(name)
return self.expr_to_var[expr]
开发者ID:sj90101,项目名称:pytential,代码行数:57,代码来源:compiler.py
示例14: __call__
def __call__(self, *args):
from pytools import indices_in_shape, single_valued
oa_shape = single_valued(ary.shape for fac, ary in args)
result = numpy.zeros(oa_shape, dtype=object)
for i in indices_in_shape(oa_shape):
args_i = [(fac, ary[i]) for fac, ary in args]
result[i] = self.scalar_kernel(*args_i)
return result
开发者ID:felipeh,项目名称:hedge,代码行数:11,代码来源:vector_primitives.py
示例15: get_or_register_dtype
def get_or_register_dtype(self, c_names, dtype=None):
"""Get or register a :class:`numpy.dtype` associated with the C type names
in the string list *c_names*. If *dtype* is `None`, no registration is
performed, and the :class:`numpy.dtype` must already have been registered.
If so, it is returned. If not, :exc:`TypeNameNotKnown` is raised.
If *dtype* is not `None`, registration is attempted. If the *c_names* are
already known and registered to identical :class:`numpy.dtype` objects,
then the previously dtype object of the previously registered type is
returned. If the *c_names* are not yet known, the type is registered. If
one of the *c_names* is known but registered to a different type, an error
is raised. In this latter case, the type may end up partially registered
and any further behavior is undefined.
.. versionadded:: 2012.2
"""
if isinstance(c_names, str):
c_names = [c_names]
if dtype is None:
from pytools import single_valued
return single_valued(self.name_to_dtype[name] for name in c_names)
dtype = np.dtype(dtype)
# check if we've seen an identical dtype, if so retrieve exact dtype object.
try:
existing_name = self.dtype_to_name[dtype]
except KeyError:
existed = False
else:
existed = True
existing_dtype = self.name_to_dtype[existing_name]
assert existing_dtype == dtype
dtype = existing_dtype
for nm in c_names:
try:
name_dtype = self.name_to_dtype[nm]
except KeyError:
self.name_to_dtype[nm] = dtype
else:
if name_dtype != dtype:
raise RuntimeError("name '%s' already registered to "
"different dtype" % nm)
if not existed:
self.dtype_to_name[dtype] = c_names[0]
if not str(dtype) in self.dtype_to_name:
self.dtype_to_name[str(dtype)] = c_names[0]
return dtype
开发者ID:deepnirmal,项目名称:py-cuda,代码行数:53,代码来源:dtypes.py
示例16: find_index_rank
def find_index_rank(self, name):
irf = IndexRankFinder(name)
for insn in self.instructions:
insn.with_transformed_expressions(
lambda expr: irf(self.submap(expr)))
if not irf.index_ranks:
return 0
else:
from pytools import single_valued
return single_valued(irf.index_ranks)
开发者ID:rckirby,项目名称:loopy,代码行数:12,代码来源:creation.py
示例17: multi_take
def multi_take(arrays, indices, out=None, queue=None):
if not len(arrays):
return []
assert len(indices.shape) == 1
from pytools import single_valued
a_dtype = single_valued(a.dtype for a in arrays)
a_allocator = arrays[0].dtype
context = indices.context
queue = queue or indices.queue
vec_count = len(arrays)
if out is None:
out = [Array(context, queue, indices.shape, a_dtype,
allocator=a_allocator)
for i in range(vec_count)]
else:
if len(out) != len(arrays):
raise ValueError("out and arrays must have the same length")
chunk_size = _builtin_min(vec_count, 10)
def make_func_for_chunk_size(chunk_size):
knl = elementwise.get_take_kernel(
indices.context, a_dtype, indices.dtype,
vec_count=chunk_size)
knl.set_block_shape(*indices._block)
return knl
knl = make_func_for_chunk_size(chunk_size)
for start_i in range(0, len(arrays), chunk_size):
chunk_slice = slice(start_i, start_i+chunk_size)
if start_i + chunk_size > vec_count:
knl = make_func_for_chunk_size(vec_count-start_i)
gs, ls = indices.get_sizes(queue,
knl.get_work_group_info(
cl.kernel_work_group_info.WORK_GROUP_SIZE,
queue.device))
knl(queue, gs, ls,
indices.data,
*([o.data for o in out[chunk_slice]]
+ [i.data for i in arrays[chunk_slice]]
+ [indices.size]))
return out
开发者ID:Gormse,项目名称:MacMiner,代码行数:51,代码来源:array.py
示例18: gpu_diffmats
def gpu_diffmats(self, diff_op_cls, elgroup):
discr = self.discr
given = self.plan.given
columns = given.dofs_per_el()*discr.dimensions
additional_columns = 0
# avoid smem fetch bank conflicts by ensuring odd col count
if columns % 2 == 0:
columns += 1
additional_columns += 1
block_floats = given.devdata.align_dtype(
columns*self.plan.segment_size, given.float_size())
vstacked_matrices = [
numpy.vstack(given.microblock.elements*(m,))
for m in diff_op_cls.matrices(elgroup)
]
segments = []
from pytools import single_valued
for segment_start in range(0, given.microblock.elements*given.dofs_per_el(), self.plan.segment_size):
matrices = [
m[segment_start:segment_start+self.plan.segment_size]
for m in vstacked_matrices]
matrices.append(
numpy.zeros((single_valued(m.shape[0] for m in matrices),
additional_columns))
)
diffmats = numpy.asarray(
numpy.hstack(matrices),
dtype=given.float_type,
order="C")
segments.append(buffer(diffmats))
from hedge.backends.cuda.tools import pad_and_join
from pytools import Record
class GPUDifferentiationMatrices(Record):
pass
return GPUDifferentiationMatrices(
device_memory=cuda.to_device(
pad_and_join(segments, block_floats*given.float_size())),
block_floats=block_floats,
matrix_columns=columns)
开发者ID:allansnielsen,项目名称:hedge,代码行数:49,代码来源:diff_shared_segmat.py
示例19: get_or_register_dtype
def get_or_register_dtype(c_names, dtype=None):
"""Get or register a :class:`numpy.dtype` associated with the C type names in the
string list *c_names*. If *dtype* is `None`, no registration is performed, and the
:class:`numpy.dtype` must already have been registered. If so, it is returned.
If not, :exc:`TypeNameNotKnown` is raised.
If *dtype* is not `None`, registration is attempted. If the *c_names* are already
known and registered to identical :class:`numpy.dtype` objects, then the previously
registered type is returned. Otherwise, the type is registered.
.. versionadded:: 2012.2
"""
if isinstance(c_names, str):
c_names = [c_names]
if dtype is None:
from pytools import single_valued
return single_valued(NAME_TO_DTYPE[name] for name in c_names)
dtype = np.dtype(dtype)
# check if we've seen an identical dtype, if so retrieve exact dtype object.
try:
existing_name = DTYPE_TO_NAME[dtype]
except KeyError:
existed = False
else:
existed = True
existing_dtype = NAME_TO_DTYPE[existing_name]
assert existing_dtype == dtype
dtype = existing_dtype
for nm in c_names:
try:
name_dtype = NAME_TO_DTYPE[nm]
except KeyError:
NAME_TO_DTYPE[nm] = dtype
else:
if name_dtype != dtype:
raise RuntimeError("name '%s' already registered to different dtype" % nm)
if not existed:
DTYPE_TO_NAME[dtype] = c_names[0]
if not str(dtype) in DTYPE_TO_NAME:
DTYPE_TO_NAME[str(dtype)] = c_names[0]
return dtype
开发者ID:slachowsky,项目名称:compyte,代码行数:49,代码来源:dtypes.py
示例20: combine
def combine(dtypes):
# dtypes may just be a generator expr
dtypes = list(dtypes)
from loopy.types import LoopyType, NumpyType
assert all(isinstance(dtype, LoopyType) for dtype in dtypes)
if not all(isinstance(dtype, NumpyType) for dtype in dtypes):
from pytools import is_single_valued, single_valued
if not is_single_valued(dtypes):
raise TypeInferenceFailure(
"Nothing known about operations between '%s'"
% ", ".join(str(dt) for dt in dtypes))
return single_valued(dtypes)
dtypes = [dtype.dtype for dtype in dtypes]
result = dtypes.pop()
while dtypes:
other = dtypes.pop()
if result.fields is None and other.fields is None:
if (result, other) in [
(np.int32, np.float32), (np.float32, np.int32)]:
# numpy makes this a double. I disagree.
result = np.dtype(np.float32)
else:
result = (
np.empty(0, dtype=result)
+ np.empty(0, dtype=other)
).dtype
elif result.fields is None and other.fields is not None:
# assume the non-native type takes over
# (This is used for vector types.)
result = other
elif result.fields is not None and other.fields is None:
# assume the non-native type takes over
# (This is used for vector types.)
pass
else:
if result is not other:
raise TypeInferenceFailure(
"nothing known about result of operation on "
"'%s' and '%s'" % (result, other))
return NumpyType(result)
开发者ID:cmsquared,项目名称:loopy,代码行数:48,代码来源:expression.py
注:本文中的pytools.single_valued函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论