本文整理汇总了Python中numpy.can_cast函数的典型用法代码示例。如果您正苦于以下问题:Python can_cast函数的具体用法?Python can_cast怎么用?Python can_cast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了can_cast函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: validate_datatype
def validate_datatype(validator, datatype, instance, schema):
if isinstance(instance, list):
array = inline_data_asarray(instance)
in_datatype, _ = numpy_dtype_to_asdf_datatype(array.dtype)
elif isinstance(instance, dict):
if 'datatype' in instance:
in_datatype = instance['datatype']
elif 'data' in instance:
array = inline_data_asarray(instance['data'])
in_datatype, _ = numpy_dtype_to_asdf_datatype(array.dtype)
else:
raise ValidationError("Not an array")
elif isinstance(instance, (np.ndarray, NDArrayType)):
in_datatype, _ = numpy_dtype_to_asdf_datatype(instance.dtype)
else:
raise ValidationError("Not an array")
if datatype == in_datatype:
return
if schema.get('exact_datatype', False):
yield ValidationError(
"Expected datatype '{0}', got '{1}'".format(
datatype, in_datatype))
np_datatype = asdf_datatype_to_numpy_dtype(datatype)
np_in_datatype = asdf_datatype_to_numpy_dtype(in_datatype)
if not np_datatype.fields:
if np_in_datatype.fields:
yield ValidationError(
"Expected scalar datatype '{0}', got '{1}'".format(
datatype, in_datatype))
if not np.can_cast(np_in_datatype, np_datatype, 'safe'):
yield ValidationError(
"Can not safely cast from '{0}' to '{1}' ".format(
in_datatype, datatype))
else:
if not np_in_datatype.fields:
yield ValidationError(
"Expected structured datatype '{0}', got '{1}'".format(
datatype, in_datatype))
if len(np_in_datatype.fields) != len(np_datatype.fields):
yield ValidationError(
"Mismatch in number of columns: "
"Expected {0}, got {1}".format(
len(datatype), len(in_datatype)))
for i in range(len(np_datatype.fields)):
in_type = np_in_datatype[i]
out_type = np_datatype[i]
if not np.can_cast(in_type, out_type, 'safe'):
yield ValidationError(
"Can not safely cast to expected datatype: "
"Expected {0}, got {1}".format(
numpy_dtype_to_asdf_datatype(out_type)[0],
numpy_dtype_to_asdf_datatype(in_type)[0]))
开发者ID:vmarkovtsev,项目名称:asdf,代码行数:60,代码来源:ndarray.py
示例2: sum
def sum(self, axis=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if axis is None:
return self.data.sum()
elif (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
# Mimic numpy's casting.
if np.issubdtype(self.dtype, np.float_):
res_dtype = np.float_
elif (self.dtype.kind == 'u' and
np.can_cast(self.dtype, np.uint)):
res_dtype = np.uint
elif np.can_cast(self.dtype, np.int_):
res_dtype = np.int_
else:
res_dtype = self.dtype
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = np.asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
return ret
else:
return spmatrix.sum(self, axis)
开发者ID:AldenJurling,项目名称:scipy,代码行数:31,代码来源:compressed.py
示例3: get_sum_dtype
def get_sum_dtype(dtype):
"""Mimic numpy's casting for np.sum"""
if dtype.kind == 'u' and np.can_cast(dtype, np.uint):
return np.uint
if np.can_cast(dtype, np.int_):
return np.int_
return dtype
开发者ID:ElDeveloper,项目名称:scipy,代码行数:7,代码来源:sputils.py
示例4: sum
def sum(self, axis=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# We use multiplication by an array of ones to achieve this.
# For some sparse matrix formats more efficient methods are
# possible -- these should override this function.
m, n = self.shape
# Mimic numpy's casting.
if np.issubdtype(self.dtype, np.float_):
res_dtype = np.float_
elif (self.dtype.kind == 'u' and
np.can_cast(self.dtype, np.uint)):
res_dtype = np.uint
elif np.can_cast(self.dtype, np.int_):
res_dtype = np.int_
else:
res_dtype = self.dtype
if axis is None:
# sum over rows and columns
return (self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))).sum()
if axis < 0:
axis += 2
if axis == 0:
# sum over columns
return np.asmatrix(np.ones((1, m), dtype=res_dtype)) * self
elif axis == 1:
# sum over rows
return self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))
else:
raise ValueError("axis out of bounds")
开发者ID:AldenJurling,项目名称:scipy,代码行数:34,代码来源:base.py
示例5: transform_scalars
def transform_scalars(dataset, constant=0):
"""Add a constant to the data set"""
from tomviz import utils
import numpy as np
scalars = utils.get_scalars(dataset)
if scalars is None:
raise RuntimeError("No scalars found!")
# Try to be a little smart so that we don't always just produce a
# double-precision output
newMin = np.min(scalars) + constant
newMax = np.max(scalars) + constant
if (constant).is_integer() and newMin.is_integer() and newMax.is_integer():
# Let ints be ints!
constant = int(constant)
newMin = int(newMin)
newMax = int(newMax)
for dtype in [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32,
np.uint64, np.int64, np.float32, np.float64]:
if np.can_cast(newMin, dtype) and np.can_cast(newMax, dtype):
constant = np.array([constant], dtype=dtype)
break
# numpy should cast to an appropriate output type to avoid overflow
result = scalars + constant
utils.set_scalars(dataset, result)
开发者ID:cjh1,项目名称:tomviz,代码行数:29,代码来源:AddConstant.py
示例6: test_can_cast_record
def test_can_cast_record(self):
import numpy as np
rec1 = np.dtype([('x', int), ('y', float)])
rec2 = np.dtype([('x', float), ('y', float)])
rec3 = np.dtype([('y', np.float64), ('x', float)])
assert not np.can_cast(rec1, rec2, 'equiv')
assert np.can_cast(rec2, rec3, 'equiv')
assert np.can_cast(rec1, rec2)
开发者ID:pypyjs,项目名称:pypy,代码行数:8,代码来源:test_casting.py
示例7: resolve
def resolve(self, identifier, additional_namespace=None, strip_units=False):
'''
The additional_namespace (e.g. the local/global namespace) will only
be used if the namespace does not contain any user-defined namespace.
'''
# We save tuples of (namespace description, referred object) to
# give meaningful warnings in case of duplicate definitions
matches = []
if self.is_explicit or additional_namespace is None:
namespaces = self.namespaces
else:
namespaces = OrderedDict(self.namespaces)
# Add the additional namespace in the end
description, namespace = additional_namespace
namespaces[description] = namespace
for description, namespace in namespaces.iteritems():
if identifier in namespace:
matches.append((description, namespace[identifier]))
if len(matches) == 0:
# No match at all
raise KeyError(('The identifier "%s" could not be resolved.') %
(identifier))
elif len(matches) > 1:
# Possibly, all matches refer to the same object
first_obj = matches[0][1]
if not all([(m[1] is first_obj) or _same_function(m[1], first_obj)
for m in matches]):
_conflict_warning(('The name "%s" refers to different objects '
'in different namespaces used for resolving. '
'Will use the object from the %s namespace '
'with the value %r') %
(identifier, matches[0][0],
first_obj), matches[1:])
# use the first match (according to resolution order)
resolved = matches[0][1]
# Remove units
if strip_units and isinstance(resolved, Quantity):
if resolved.ndim == 0:
resolved = float(resolved)
else:
resolved = np.asarray(resolved)
# Use standard Python types if possible
if not isinstance(resolved, np.ndarray) and hasattr(resolved, 'dtype'):
numpy_type = resolved.dtype
if np.can_cast(numpy_type, np.int_):
resolved = int(resolved)
elif np.can_cast(numpy_type, np.float_):
resolved = float(resolved)
elif np.can_cast(numpy_type, np.complex_):
resolved = complex(resolved)
return resolved
开发者ID:yayyme,项目名称:brian2,代码行数:58,代码来源:namespace.py
示例8: __new__
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if not subok and type(value) is not cls:
value = value.view(cls)
if dtype is None:
if not copy:
return value
if not np.can_cast(np.float32, value.dtype):
dtype = np.float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe list/tuple of Quantity? short-circuit array for speed
if(not isinstance(value, np.ndarray) and isiterable(value) and
all(isinstance(v, Quantity) for v in value)):
if unit is None:
unit = value[0].unit
value = [q.to(unit).value for q in value]
copy = False # copy already made
else:
if unit is None:
unit = dimensionless_unscaled
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=False, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(() if value.ndim == 0 else 0),
numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and not np.can_cast(np.float32, value.dtype):
value = value.astype(np.float)
value = value.view(cls)
value._unit = unit
return value
开发者ID:CadenArmstrong,项目名称:astropy,代码行数:58,代码来源:quantity.py
示例9: can_cast1
def can_cast1(args, ty_ins):
for i in six.moves.range(nin):
if args[i].const is None:
if not numpy.can_cast(args[i].ty, ty_ins[i]):
return False
else:
if not numpy.can_cast(args[i].const, ty_ins[i]):
return False
return True
开发者ID:hitottiez,项目名称:chainer,代码行数:9,代码来源:fusion.py
示例10: gentle_asarray
def gentle_asarray(a, dtype):
"""
Performs an asarray that doesn't cause a copy if the byteorder is
different. It also ignores column name differences -- the
resulting array will have the column names from the given dtype.
"""
out_dtype = np.dtype(dtype)
if isinstance(a, np.ndarray):
in_dtype = a.dtype
# Non-table array
if in_dtype.fields is None and out_dtype.fields is None:
if np.can_cast(in_dtype, out_dtype, 'equiv'):
return a
else:
return np.asanyarray(a, dtype=out_dtype)
elif in_dtype.fields is not None and out_dtype.fields is not None:
if in_dtype == out_dtype:
return a
in_names = {n.lower() for n in in_dtype.names}
out_names = {n.lower() for n in out_dtype.names}
if in_names == out_names:
# Change the dtype name to match the fits record names
# as the mismatch causes case insensitive access to fail
out_dtype.names = in_dtype.names
else:
raise ValueError(
"Column names don't match schema. "
"Schema has {0}. Data has {1}".format(
str(out_names.difference(in_names)),
str(in_names.difference(out_names))))
new_dtype = []
for i in range(len(out_dtype.fields)):
in_type = in_dtype[i]
out_type = out_dtype[i]
if in_type.subdtype is None:
type_str = in_type.str
else:
type_str = in_type.subdtype[0].str
if np.can_cast(in_type, out_type, 'equiv'):
new_dtype.append(
(out_dtype.names[i],
type_str,
in_type.shape))
else:
return np.asanyarray(a, dtype=out_dtype)
return a.view(dtype=np.dtype(new_dtype))
else:
return np.asanyarray(a, dtype=out_dtype)
else:
try:
a = np.asarray(a, dtype=out_dtype)
except Exception:
raise ValueError("Can't convert {0!s} to ndarray".format(type(a)))
return a
开发者ID:jaytmiller,项目名称:jwst,代码行数:55,代码来源:util.py
示例11: analyze
def analyze(aDataObject, bDataObject,
epsilonValue=0.0, epsilonPercent=None):
"""
analyze the differences between the two data sets
updates the two data objects with additional masks
and returns data object containing diff data and masks
"""
shape = aDataObject.data.shape
assert(bDataObject.data.shape == shape)
assert(np.can_cast(aDataObject.data.dtype, bDataObject.data.dtype) or
np.can_cast(bDataObject.data.dtype, aDataObject.data.dtype))
# do some basic analysis on the individual data sets
aDataObject.self_analysis()
bDataObject.self_analysis()
# where is the shared valid data?
valid_in_both = aDataObject.masks.valid_mask & bDataObject.masks.valid_mask
ignore_in_both = aDataObject.masks.ignore_mask | bDataObject.masks.ignore_mask
# get our shared data type and fill value
sharedType, fill_data_value = DiffInfoObject._get_shared_type_and_fill_value(aDataObject.data,
bDataObject.data,
aDataObject.select_fill_value(),
bDataObject.select_fill_value())
# we can't continue if we don't have a fill value
assert(fill_data_value is not None)
# construct our diff'ed data set
raw_diff = np.zeros(shape, dtype=sharedType)
raw_diff[~valid_in_both] = fill_data_value # throw away invalid data
# compute difference, using shared type in computation
raw_diff[valid_in_both] = bDataObject.data[valid_in_both].astype(sharedType) - \
aDataObject.data[valid_in_both].astype(sharedType)
# the valid data which is too different between the two sets according to the given epsilon
outside_epsilon_mask = np.zeros(shape, dtype=np.bool)
if (epsilonValue is not None) :
outside_epsilon_mask |= (abs(raw_diff) > epsilonValue) & valid_in_both
if (epsilonPercent is not None) :
outside_epsilon_mask |= (abs(raw_diff) > abs(aDataObject.data * (float(epsilonPercent) / 100.0))) & valid_in_both
# mismatch points = mismatched nans, mismatched missing-values, differences that are too large
mismatch_pt_mask = ( (aDataObject.masks.non_finite_mask ^ bDataObject.masks.non_finite_mask) |
(aDataObject.masks.missing_mask ^ bDataObject.masks.missing_mask) |
outside_epsilon_mask )
# make our diff data object
diff_data_object = DataObject(raw_diff, fillValue=fill_data_value)
diff_data_object.masks = DiffMaskSetObject(ignore_in_both, valid_in_both,
mismatch_pt_mask, outside_epsilon_mask)
return diff_data_object
开发者ID:adesmet-ssec,项目名称:uwglance,代码行数:54,代码来源:data.py
示例12: gentle_asarray
def gentle_asarray(a, dtype):
"""
Performs an asarray that doesn't cause a copy if the byteorder is
different. It also ignores column name differences -- the
resulting array will have the column names from the given dtype.
"""
out_dtype = np.dtype(dtype)
if isinstance(a, np.ndarray):
in_dtype = a.dtype
# Non-table array
if in_dtype.fields is None and out_dtype.fields is None:
if np.can_cast(in_dtype, out_dtype, 'equiv'):
return a
else:
return np.asanyarray(a, dtype=out_dtype)
elif in_dtype.fields is not None and out_dtype.fields is not None:
if in_dtype == out_dtype:
return a
if len(in_dtype) != len(out_dtype):
raise ValueError(
"Wrong number of columns. Expected {0}, got {1}".format(
len(out_dtype), len(in_dtype)))
new_dtype = []
# Change the dtype name to match the fits record names
# as the mismatch causes case insensitive access to fail
if hasattr(in_dtype, 'names') and hasattr(out_dtype, 'names'):
out_dtype.names = in_dtype.names
for i in range(len(out_dtype.fields)):
in_type = in_dtype[i]
out_type = out_dtype[i]
if in_type.subdtype is None:
type_str = in_type.str
else:
type_str = in_type.subdtype[0].str
if np.can_cast(in_type, out_type, 'equiv'):
new_dtype.append(
(out_dtype.names[i],
type_str,
in_type.shape))
else:
return np.asanyarray(a, dtype=out_dtype)
return a.view(dtype=np.dtype(new_dtype))
else:
return np.asanyarray(a, dtype=out_dtype)
else:
try:
a = np.asarray(a, dtype=out_dtype)
except:
raise ValueError("Can't convert {0!s} to ndarray".format(type(a)))
return a
开发者ID:sosey,项目名称:jwst,代码行数:50,代码来源:util.py
示例13: upcast
def upcast(*args):
"""Returns the nearest supported sparse dtype for the
combination of one or more types.
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
Examples
--------
>>> upcast('int32')
<type 'numpy.int32'>
>>> upcast('bool')
<type 'numpy.int8'>
>>> upcast('int32','float32')
<type 'numpy.float64'>
>>> upcast('bool',complex,float)
<type 'numpy.complex128'>
"""
t = _upcast_memo.get(hash(args))
if t is not None:
return t
upcast = np.find_common_type(args, [])
for t in supported_dtypes:
if np.can_cast(upcast, t):
_upcast_memo[hash(args)] = t
return t
raise TypeError('no supported conversion for types: %s' % args)
开发者ID:87,项目名称:scipy,代码行数:32,代码来源:sputils.py
示例14: __init__
def __init__(self, element_strategy, shape, dtype, fill, unique):
self.shape = tuple(shape)
self.fill = fill
check_argument(shape,
u'Array shape must have at least one dimension, '
u'provided shape was {}', shape)
check_argument(all(isinstance(s, int) for s in shape),
u'Array shape must be integer in each dimension, '
u'provided shape was {}', shape)
self.array_size = int(np.prod(shape))
self.dtype = dtype
self.element_strategy = element_strategy
self.unique = unique
# Used by self.insert_element to check that the value can be stored
# in the array without e.g. overflowing. See issue #1385.
if dtype.kind in (u'i', u'u'):
self.check_cast = lambda x: np.can_cast(x, self.dtype, 'safe')
elif dtype.kind == u'f' and dtype.itemsize == 2:
max_f2 = (2. - 2 ** -10) * 2 ** 15
self.check_cast = lambda x: \
(not np.isfinite(x)) or (-max_f2 <= x <= max_f2)
elif dtype.kind == u'f' and dtype.itemsize == 4:
max_f4 = (2. - 2 ** -23) * 2 ** 127
self.check_cast = lambda x: \
(not np.isfinite(x)) or (-max_f4 <= x <= max_f4)
else:
self.check_cast = lambda x: True
开发者ID:Wilfred,项目名称:hypothesis-python,代码行数:28,代码来源:numpy.py
示例15: set_data
def set_data(self, A):
"""
Set the image array
ACCEPTS: numpy/PIL Image A
"""
# check if data is PIL Image without importing Image
if hasattr(A, 'getpixel'):
self._A = pil_to_array(A)
else:
self._A = cbook.safe_masked_invalid(A)
if (self._A.dtype != np.uint8 and
not np.can_cast(self._A.dtype, np.float)):
raise TypeError("Image data can not convert to float")
if (self._A.ndim not in (2, 3) or
(self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
raise TypeError("Invalid dimensions for image data")
self._imcache = None
self._rgbacache = None
self._oldxslice = None
self._oldyslice = None
self.stale = True
开发者ID:giltis,项目名称:matplotlib,代码行数:25,代码来源:image.py
示例16: upcast
def upcast(*args):
"""Returns the nearest supported sparse dtype for the
combination of one or more types.
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
Examples
--------
>>> upcast('int32')
<type 'numpy.int32'>
>>> upcast('bool')
<type 'numpy.int8'>
>>> upcast('int32','float32')
<type 'numpy.float64'>
>>> upcast('bool',complex,float)
<type 'numpy.complex128'>
"""
sample = np.array([0],dtype=args[0])
for t in args[1:]:
sample = sample + np.array([0],dtype=t)
upcast = sample.dtype
for t in supported_dtypes:
if np.can_cast(sample.dtype,t):
return t
raise TypeError,'no supported conversion for types: %s' % args
开发者ID:AndreI11,项目名称:SatStressGui,代码行数:30,代码来源:sputils.py
示例17: read_raster
def read_raster(raster, masked=True, driver=None):
src = rasterio.open(raster, driver=driver)
if src.count > 1:
src.close()
raise NotImplementedError('Cannot load a multiband layer')
if src.crs.is_valid:
proj = parse_projection(src.crs)
else:
proj = None
if masked:
_raster = src.read(1, masked=masked)
# return _raster
if isinstance(src.transform, Affine):
transform = src.transform
else:
transform = src.affine # for compatibility with rasterio 0.36
rgrid = RectifiedGrid(_raster,
proj,
transform,
mask=_raster.mask)
else:
rgrid = RectifiedGrid(src.read(1),
proj,
transform,
mask=np.ma.nomask)
src.close()
# check and fix fill_value dtype
if not np.can_cast(rgrid.fill_value, rgrid.dtype, casting='safe'):
fill_value = guess_fill_value(rgrid)
rgrid.set_fill_value(fill_value)
logger.warning("read_raster: the fill_value has been changed to {}".format(fill_value))
return rgrid
开发者ID:CNR-ISMAR,项目名称:rectifiedgrid,代码行数:33,代码来源:core.py
示例18: imageArrayToStruct
def imageArrayToStruct(imgArray, sparkMode=None):
"""
Create a row representation of an image from an image array and (optional) imageType.
to_image_udf = udf(arrayToImageRow, imageSchema)
df.withColumn("output_img", to_image_udf(df["np_arr_col"])
:param imgArray: ndarray, image data.
:param sparkMode: spark mode, type information for the image, will be inferred from array if
the mode is not provide. See SparkMode for valid modes.
:return: Row, image as a DataFrame Row.
"""
# Sometimes tensors have a leading "batch-size" dimension. Assume to be 1 if it exists.
if len(imgArray.shape) == 4:
if imgArray.shape[0] != 1:
raise ValueError("The first dimension of a 4-d image array is expected to be 1.")
imgArray = imgArray.reshape(imgArray.shape[1:])
if sparkMode is None:
sparkMode = _arrayToSparkMode(imgArray)
imageType = sparkModeLookup[sparkMode]
height, width, nChannels = imgArray.shape
if imageType.nChannels != nChannels:
msg = "Image of type {} should have {} channels, but array has {} channels."
raise ValueError(msg.format(sparkMode, imageType.nChannels, nChannels))
# Convert the array to match the image type.
if not np.can_cast(imgArray, imageType.dtype, 'same_kind'):
msg = "Array of type {} cannot safely be cast to image type {}."
raise ValueError(msg.format(imgArray.dtype, imageType.dtype))
imgArray = np.array(imgArray, dtype=imageType.dtype, copy=False)
data = bytearray(imgArray.tobytes())
return Row(mode=sparkMode, height=height, width=width, nChannels=nChannels, data=data)
开发者ID:mateiz,项目名称:spark-deep-learning,代码行数:35,代码来源:imageIO.py
示例19: test_simply_typed_space_validate
def test_simply_typed_space_validate(space, batch_dtype, is_numeric):
"""
Creates a batch of batch_dtype, and sees if space validates it.
"""
assert isinstance(space, SimplyTypedSpace), \
"%s is not a SimplyTypedSpace" % type(space)
batch_sizes = (1, 3)
if not is_numeric and isinstance(space, VectorSpace) and space.sparse:
batch_sizes = (None, )
for batch_size in batch_sizes:
if is_numeric:
batch = space.get_origin_batch(dtype=batch_dtype,
batch_size=batch_size)
else:
batch = space.make_theano_batch(dtype=batch_dtype,
batch_size=batch_size,
name="test batch to validate")
# Expect an error if space.dtype is not None and batch can't cast
# to it.
if space.dtype is not None and \
not np.can_cast(batch.dtype, space.dtype):
np.testing.assert_raises(TypeError,
space._validate,
(is_numeric, batch))
else:
# Otherwise, don't expect an error.
space._validate(is_numeric, batch)
开发者ID:DevSinghSachan,项目名称:pylearn2,代码行数:31,代码来源:test_space.py
示例20: Op
def Op(self,opstr,indx,J,dtype,*args):
row = _np.array(self._basis,dtype=self._dtype)
col = _np.array(self._basis,dtype=self._dtype)
ME = _np.ones((self._Ns,),dtype=dtype)
if len(opstr) != len(indx):
raise ValueError('length of opstr does not match length of indx')
if not _np.can_cast(J,_np.dtype(dtype)):
raise TypeError("can't cast J to proper dtype")
for o in opstr[::-1]:
if o == "I":
continue
elif o == "n":
ME *= dtype(_np.abs(col))
elif o == "+":
col += 1
ME *= _np.sqrt(dtype(_np.abs(col)))
elif o == "-":
ME *= _np.sqrt(dtype(_np.abs(col)))
col -= 1
else:
raise Exception("operator symbol {0} not recognized".format(o))
mask = ( col < 0)
mask += (col > (self._Ns))
ME[mask] *= 0.0
if J != 1.0:
ME *= J
return ME,row,col
开发者ID:zenonofelea,项目名称:exact_diag_py,代码行数:33,代码来源:photon.py
注:本文中的numpy.can_cast函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论