本文整理汇总了Python中numpy.bytes_函数的典型用法代码示例。如果您正苦于以下问题:Python bytes_函数的具体用法?Python bytes_怎么用?Python bytes_使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bytes_函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _convert_to_numpy_bytes
def _convert_to_numpy_bytes(s):
if isinstance(s, np.bytes_):
return s
elif isinstance(s, bytes):
return np.bytes_(s)
else:
return np.bytes_(s.encode())
开发者ID:frejanordsiek,项目名称:Acquisition_HDF5,代码行数:7,代码来源:Acquisition_HDF5.py
示例2: getAttr
def getAttr( dataset, ky ):
try:
attrib = dataset.attrs[ky]
return np.bytes_(dataset.attrs[ky]).decode().rstrip().split("\x00")[0]
except KeyError:
print( "HDF5 key not found: '"+ky+"'. Available keys:\n")
print( list(dataset.attrs.keys()) )
raise
开发者ID:OpendTect,项目名称:OpendTect,代码行数:8,代码来源:hdf5.py
示例3: setUp
def setUp(self):
pass
self.b_lit = b'bytes literal'
self.s_lit = 'literal literal'
self.u_lit = u'unicode literal'
self.np_b_lit = np.bytes_('numpy bytes literal')
self.np_s_lit = np.str_('numpy unicode literal')
self.np_u_lit = np.unicode_('numpy unicode literal')
开发者ID:titusjan,项目名称:argos,代码行数:9,代码来源:test_utils.py
示例4: test_isscalar_numpy_array_scalars
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(lib.isscalar(np.int64(1)))
self.assertTrue(lib.isscalar(np.float64(1.0)))
self.assertTrue(lib.isscalar(np.int32(1)))
self.assertTrue(lib.isscalar(np.object_("foobar")))
self.assertTrue(lib.isscalar(np.str_("foobar")))
self.assertTrue(lib.isscalar(np.unicode_(u("foobar"))))
self.assertTrue(lib.isscalar(np.bytes_(b"foobar")))
self.assertTrue(lib.isscalar(np.datetime64("2014-01-01")))
self.assertTrue(lib.isscalar(np.timedelta64(1, "h")))
开发者ID:Feyi1,项目名称:pandas,代码行数:10,代码来源:test_infer_and_convert.py
示例5: test_isscalar_numpy_array_scalars
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(is_scalar(np.int64(1)))
self.assertTrue(is_scalar(np.float64(1.)))
self.assertTrue(is_scalar(np.int32(1)))
self.assertTrue(is_scalar(np.object_('foobar')))
self.assertTrue(is_scalar(np.str_('foobar')))
self.assertTrue(is_scalar(np.unicode_(u('foobar'))))
self.assertTrue(is_scalar(np.bytes_(b'foobar')))
self.assertTrue(is_scalar(np.datetime64('2014-01-01')))
self.assertTrue(is_scalar(np.timedelta64(1, 'h')))
开发者ID:cgrin,项目名称:pandas,代码行数:10,代码来源:test_inference.py
示例6: random_numpy
def random_numpy(shape, dtype, allow_nan=True,
allow_unicode=False):
# Makes a random numpy array of the specified shape and dtype
# string. The method is slightly different depending on the
# type. For 'bytes', 'str', and 'object'; an array of the
# specified size is made and then each element is set to either
# a numpy.bytes_, numpy.str_, or some other object of any type
# (here, it is a randomly typed random numpy array). If it is
# any other type, then it is just a matter of constructing the
# right sized ndarray from a random sequence of bytes (all must
# be forced to 0 and 1 for bool). Optionally include unicode
# characters.
if dtype == 'S':
length = random.randint(1, max_string_length)
data = np.zeros(shape=shape, dtype='S' + str(length))
for x in np.nditer(data, op_flags=['readwrite']):
if allow_unicode:
chars = random_bytes_fullrange(length)
else:
chars = random_bytes(length)
x[...] = np.bytes_(chars)
return data
elif dtype == 'U':
length = random.randint(1, max_string_length)
data = np.zeros(shape=shape, dtype='U' + str(length))
for x in np.nditer(data, op_flags=['readwrite']):
if allow_unicode:
chars = _random_str_some_unicode(length)
else:
chars = random_str_ascii(length)
x[...] = np.unicode_(chars)
return data
elif dtype == 'object':
data = np.zeros(shape=shape, dtype='object')
for index, x in np.ndenumerate(data):
data[index] = random_numpy( \
shape=random_numpy_shape( \
object_subarray_dimensions, \
max_object_subarray_axis_length), \
dtype=random.choice(dtypes))
return data
else:
nbytes = np.ndarray(shape=(1,), dtype=dtype).nbytes
bts = np.random.bytes(nbytes * np.prod(shape))
if dtype == 'bool':
bts = b''.join([{True: b'\x01', False: b'\x00'}[ \
ch > 127] for ch in bts])
data = np.ndarray(shape=shape, dtype=dtype, buffer=bts)
# If it is a floating point type and we are supposed to
# remove NaN's, then turn them to zeros.
if not allow_nan and data.dtype.kind in ('f', 'c') \
and np.any(np.isnan(data)):
data = data.copy()
data[np.isnan(data)] = 0.0
return data
开发者ID:dashesy,项目名称:hdf5storage,代码行数:55,代码来源:make_randoms.py
示例7: logfile
def logfile(self, logfile):
if PYVERSION == 3:
try:
self.radex.setup.logfile[:] = np.bytes_([""]*len(self.radex.setup.logfile))
except TypeError as ex:
self.radex.setup.logfile = " " * self.radex.setup.logfile.dtype.itemsize
else:
self.radex.setup.logfile[:] = ""
try:
self.radex.setup.logfile[:len(logfile)] = logfile
except IndexError:
self.radex.setup.logfile = logfile + " " * (self.radex.setup.logfile.dtype.itemsize - len(logfile))
开发者ID:keflavich,项目名称:pyradex,代码行数:12,代码来源:core.py
示例8: outfile
def outfile(self, outfile):
if PYVERSION == 3:
try:
self.radex.impex.outfile[:] = np.bytes_([""]*len(self.radex.impex.outfile))
except TypeError as ex:
self.radex.impex.outfile = " " * self.radex.impex.outfile.dtype.itemsize
else:
self.radex.impex.outfile[:] = ""
try:
self.radex.impex.outfile[:len(outfile)] = outfile
except IndexError:
self.radex.impex.outfile = outfile + " " * (self.radex.impex.outfile.dtype.itemsize - len(outfile))
开发者ID:keflavich,项目名称:pyradex,代码行数:12,代码来源:core.py
示例9: search_for_string
def search_for_string(h5_str, value):
match = False
if h5_str is not None:
if isinstance(h5_str, (str, np.string_)):
if h5_str == value:
match = True
elif isinstance(h5_str, (list, np.ndarray)):
match = False
for i in range(len(h5_str)):
if h5_str[i] == value or h5_str[i] == np.bytes_(value):
match = True
break
return match
开发者ID:deep-introspection,项目名称:nwb-api,代码行数:13,代码来源:test_utils.py
示例10: random_numpy_scalar
def random_numpy_scalar(dtype):
# How a random scalar is made depends on th type. For must, it
# is just a single number. But for the string types, it is a
# string of any length.
if dtype == 'S':
return np.bytes_(random_bytes(random.randint(1,
max_string_length)))
elif dtype == 'U':
return np.unicode_(random_str_ascii(
random.randint(1,
max_string_length)))
else:
return random_numpy(tuple(), dtype)[()]
开发者ID:dashesy,项目名称:hdf5storage,代码行数:13,代码来源:make_randoms.py
示例11: check_shaderError
def check_shaderError(shader, flag, isProgram, errorMessage):
success = bgl.Buffer(bgl.GL_INT, 1)
if isProgram:
bgl.glGetProgramiv(shader, flag, success)
else:
bgl.glGetShaderiv(shader, flag, success)
if success[0] == bgl.GL_FALSE:
import numpy as np
import ctypes
offset = bgl.Buffer(bgl.GL_INT, 1, (ctypes.c_int32 * 1).from_address(0))
error = bgl.Buffer(bgl.GL_BYTE, 1024)
if isProgram:
bgl.glGetProgramInfoLog(shader, 1024, offset, error)
print(errorMessage, np.bytes_(error).decode("utf-8"))
else:
bgl.glGetShaderInfoLog(shader, 1024, offset, error)
print(errorMessage, np.bytes_(error).decode("utf-8"))
del offset
raise #RuntimeError(errorMessage, bgl.glGetShaderInfoLog(shader))
开发者ID:mgschwan,项目名称:blensor,代码行数:23,代码来源:utils_shader.py
示例12: molpath
def molpath(self, molfile):
if "~" in molfile:
molfile = os.path.expanduser(molfile)
if PYVERSION == 3:
try:
self.radex.impex.molfile[:] = np.bytes_([""]*len(self.radex.impex.molfile))
except TypeError as ex:
self.radex.impex.molfile = " " * self.radex.impex.molfile.dtype.itemsize
else:
self.radex.impex.molfile[:] = ""
utils.verify_collisionratefile(molfile)
try:
self.radex.impex.molfile[:len(molfile)] = molfile
except IndexError:
self.radex.impex.molfile = molfile + " " * (self.radex.impex.molfile.dtype.itemsize - len(molfile))
开发者ID:keflavich,项目名称:pyradex,代码行数:15,代码来源:core.py
示例13: set_attribute_string
def set_attribute_string(target, name, value):
""" Sets an attribute to a string on a Dataset or Group.
If the attribute `name` doesn't exist yet, it is created. If it
already exists, it is overwritten if it differs from `value`.
Parameters
----------
target : Dataset or Group
Dataset or Group to set the string attribute of.
name : str
Name of the attribute to set.
value : string
Value to set the attribute to. Can be any sort of string type
that will convert to a ``numpy.bytes_``
"""
set_attribute(target, name, np.bytes_(value))
开发者ID:dashesy,项目名称:hdf5storage,代码行数:18,代码来源:utilities.py
示例14: datapath
def datapath(self, radat):
# self.radex data path not needed if molecule given as full path
if PYVERSION == 3:
try:
self.radex.setup.radat[:] = np.bytes_([""] * len(self.radex.setup.radat))
except TypeError as ex:
# now radat gets treated as a single S120 instead of an array of S1s
self.radex.setup.radat = " " * self.radex.setup.radat.dtype.itemsize
else:
self.radex.setup.radat[:] = ""
# there is dangerous magic here: radat needs to be interpreted as an array,
# but you can't make it an array of characters easily...
try:
self.radex.setup.radat[:len(radat)] = radat
except IndexError:
# in python3, this might just work, where the above doesn't?
# (this works if RADAT is an S120)
# the added space is because the right and left side must have *exactly* the same size
self.radex.setup.radat = radat + " " * (self.radex.setup.radat.dtype.itemsize - len(radat))
开发者ID:keflavich,项目名称:pyradex,代码行数:19,代码来源:core.py
示例15: check_dict_like_other_type_key
def check_dict_like_other_type_key(self, tp, other_tp):
data = random_dict(tp)
key_gen = random_str_some_unicode(max_dict_key_length)
if other_tp == 'numpy.bytes_':
key = np.bytes_(key_gen.encode('UTF-8'))
elif other_tp == 'numpy.unicode_':
key = np.unicode_(key_gen)
elif other_tp == 'bytes':
key = key_gen.encode('UTF-8')
elif other_tp == 'int':
key = random_int()
elif other_tp == 'float':
key = random_float()
data[key] = random_int()
out = self.write_readback(data, random_name(),
self.options)
self.assert_equal(out, data)
开发者ID:frejanordsiek,项目名称:hdf5storage,代码行数:19,代码来源:test_write_readback.py
示例16: check_string_type_non_str_key
def check_string_type_non_str_key(tp, other_tp, option_keywords):
options = hdf5storage.Options(**option_keywords)
key_value_names = (options.dict_like_keys_name,
options.dict_like_values_name)
data = random_dict(tp)
for k in key_value_names:
if k in data:
del data[k]
keys = list(data.keys())
key_gen = random_str_some_unicode(max_dict_key_length)
if other_tp == 'numpy.bytes_':
key = np.bytes_(key_gen.encode('UTF-8'))
elif other_tp == 'numpy.unicode_':
key = np.unicode_(key_gen)
elif other_tp == 'bytes':
key = key_gen.encode('UTF-8')
data[key] = random_int()
keys.append(key_gen)
# Make a random name.
name = random_name()
# Write the data to the proper file with the given name with the
# provided options. The file needs to be deleted after to keep junk
# from building up.
fld = None
try:
fld = tempfile.mkstemp()
os.close(fld[0])
filename = fld[1]
hdf5storage.write(data, path=name, filename=filename,
options=options)
with h5py.File(filename) as f:
assert_equal_nose(set(keys), set(f[name].keys()))
except:
raise
finally:
if fld is not None:
os.remove(fld[1])
开发者ID:frejanordsiek,项目名称:hdf5storage,代码行数:43,代码来源:test_dict_like_storage_methods.py
示例17: convert_to_numpy_bytes
def convert_to_numpy_bytes(data, length=None):
""" Decodes data to Numpy UTF-8 econded string (bytes_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.str_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
numpy.bytes_ or numpy.ndarray of numpy.bytes_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes_
"""
# The method of conversion depends on its type.
if isinstance(data, np.bytes_) or (isinstance(data, np.ndarray) \
and data.dtype.char == 'S'):
# It is already an np.bytes_ or array of them, so nothing needs
# to be done.
return data
elif isinstance(data, (bytes, bytearray)):
# Easily converted through constructor.
return np.bytes_(data)
elif (sys.hexversion >= 0x03000000 and isinstance(data, str)) \
or (sys.hexversion < 0x03000000 \
and isinstance(data, unicode)):
return np.bytes_(data.encode('UTF-8'))
elif isinstance(data, (np.uint16, np.uint32)):
# They are single UTF-16 or UTF-32 scalars, and are easily
# converted to a UTF-8 string and then passed through the
# constructor.
return np.bytes_(convert_to_str(data).encode('UTF-8'))
elif isinstance(data, np.uint8):
# It is just the uint8 version of the character, so it just
# needs to be have the dtype essentially changed by having its
# bytes read into ndarray.
return np.ndarray(shape=tuple(), dtype='S1',
buffer=data.flatten().tostring())[()]
elif isinstance(data, np.ndarray) and data.dtype.char == 'U':
# We just need to convert it elementwise.
new_data = np.zeros(shape=data.shape,
dtype='S' + str(data.dtype.itemsize))
for index, x in np.ndenumerate(data):
new_data[index] = np.bytes_(x.encode('UTF-8'))
return new_data
elif isinstance(data, np.ndarray) \
and data.dtype.name in ('uint8', 'uint16', 'uint32'):
# It is an ndarray of some uint type. How it is converted
# depends on its shape. If its shape is just (), then it is just
# a scalar wrapped in an array, which can be converted by
# recursing the scalar value back into this function.
shape = list(data.shape)
if len(shape) == 0:
return convert_to_numpy_bytes(data[()])
# As there are more than one element, it gets a bit more
# complicated. We need to take the subarrays of the specified
# length along columns (1D arrays will be treated as row arrays
# here), each of those converted to an str_ scalar (normal
# string) and stuffed into a new array.
#
# If the length was not given, it needs to be set to full. Then
# the shape of the new array needs to be calculated (divide the
# appropriate dimension, which depends on the number of
# dimentions).
if len(shape) == 1:
if length is None:
length2 = shape[0]
new_shape = (shape[0],)
#.........这里部分代码省略.........
开发者ID:dashesy,项目名称:hdf5storage,代码行数:101,代码来源:utilities.py
示例18: assert_equal_none_format
def assert_equal_none_format(a, b, options=None):
# Compares a and b for equality. b is always the original. If they
# are dictionaries, a must be a structured ndarray and they must
# have the same set of keys, after which they values must all be
# compared. If they are a collection type (list, tuple, set,
# frozenset, or deque), then the compairison must be made with b
# converted to an object array. If the original is not a numpy type
# (isn't or doesn't inherit from np.generic or np.ndarray), then it
# is a matter of converting it to the appropriate numpy
# type. Otherwise, both are supposed to be numpy types. For object
# arrays, each element must be iterated over to be compared. Then,
# if it isn't a string type, then they must have the same dtype,
# shape, and all elements. If it is an empty string, then it would
# have been stored as just a null byte (recurse to do that
# comparison). If it is a bytes_ type, the dtype, shape, and
# elements must all be the same. If it is string_ type, we must
# convert to uint32 and then everything can be compared. Big longs
# and ints get written as numpy.bytes_.
if type(b) == dict or (sys.hexversion >= 0x2070000
and type(b) == collections.OrderedDict):
assert type(a) == np.ndarray
assert a.dtype.names is not None
# Determine if any of the keys could not be stored as str. If
# they all can be, then the dtype field names should be the
# keys. Otherwise, they should be 'keys' and 'values'.
all_str_keys = True
if sys.hexversion >= 0x03000000:
tp_str = str
tp_bytes = bytes
converters = {tp_str: lambda x: x,
tp_bytes: lambda x: x.decode('UTF-8'),
np.bytes_:
lambda x: bytes(x).decode('UTF-8'),
np.unicode_: lambda x: str(x)}
tp_conv = lambda x: converters[type(x)](x)
tp_conv_str = lambda x: tp_conv(x)
else:
tp_str = unicode
tp_bytes = str
converters = {tp_str: lambda x: x,
tp_bytes: lambda x: x.decode('UTF-8'),
np.bytes_:
lambda x: bytes(x).decode('UTF-8'),
np.unicode_: lambda x: unicode(x)}
tp_conv = lambda x: converters[type(x)](x)
tp_conv_str = lambda x: tp_conv(x).encode('UTF-8')
tps = tuple(converters.keys())
for k in b.keys():
if type(k) not in tps:
all_str_keys = False
break
try:
k_str = tp_conv(k)
except:
all_str_keys = False
break
if all_str_keys:
assert set(a.dtype.names) == set([tp_conv_str(k)
for k in b.keys()])
for k in b:
assert_equal_none_format(a[tp_conv_str(k)][0],
b[k], options)
else:
names = (options.dict_like_keys_name,
options.dict_like_values_name)
assert set(a.dtype.names) == set(names)
keys = a[names[0]]
values = a[names[1]]
assert_equal_none_format(keys, tuple(b.keys()), options)
assert_equal_none_format(values, tuple(b.values()), options)
elif type(b) in (list, tuple, set, frozenset, collections.deque):
assert_equal_none_format(a, np.object_(list(b)), options)
elif not isinstance(b, (np.generic, np.ndarray)):
if b is None:
# It should be np.float64([])
assert type(a) == np.ndarray
assert a.dtype == np.float64([]).dtype
assert a.shape == (0, )
elif (sys.hexversion >= 0x03000000 \
and isinstance(b, (bytes, bytearray))) \
or (sys.hexversion < 0x03000000 \
and isinstance(b, (bytes, bytearray))):
assert a == np.bytes_(b)
elif (sys.hexversion >= 0x03000000 \
and isinstance(b, str)) \
or (sys.hexversion < 0x03000000 \
and isinstance(b, unicode)):
assert_equal_none_format(a, np.unicode_(b), options)
elif (sys.hexversion >= 0x03000000 \
and type(b) == int) \
or (sys.hexversion < 0x03000000 \
and type(b) == long):
if b > 2**63 or b < -(2**63 - 1):
assert_equal_none_format(a, np.bytes_(b), options)
else:
assert_equal_none_format(a, np.int64(b), options)
else:
assert_equal_none_format(a, np.array(b)[()], options)
else:
#.........这里部分代码省略.........
开发者ID:sungjinlees,项目名称:hdf5storage,代码行数:101,代码来源:asserts.py
示例19: assert_equal_matlab_format
def assert_equal_matlab_format(a, b, options=None):
# Compares a and b for equality. b is always the original. If they
# are dictionaries, a must be a structured ndarray and they must
# have the same set of keys, after which they values must all be
# compared. If they are a collection type (list, tuple, set,
# frozenset, or deque), then the compairison must be made with b
# converted to an object array. If the original is not a numpy type
# (isn't or doesn't inherit from np.generic or np.ndarray), then it
# is a matter of converting it to the appropriate numpy
# type. Otherwise, both are supposed to be numpy types. For object
# arrays, each element must be iterated over to be compared. Then,
# if it isn't a string type, then they must have the same dtype,
# shape, and all elements. All strings are converted to numpy.str_
# on read unless they were stored as a numpy.bytes_ due to having
# non-ASCII characters. If it is empty, it has shape (1, 0). A
# numpy.str_ has all of its strings per row compacted together. A
# numpy.bytes_ string has to have the same thing done, but then it
# needs to be converted up to UTF-32 and to numpy.str_ through
# uint32. Big longs and ints end up getting converted to UTF-16
# uint16's when written and read back as UTF-32 numpy.unicode_.
#
# In all cases, we expect things to be at least two dimensional
# arrays.
if type(b) == dict or (sys.hexversion >= 0x2070000
and type(b) == collections.OrderedDict):
assert type(a) == np.ndarray
assert a.dtype.names is not None
# Determine if any of the keys could not be stored as str. If
# they all can be, then the dtype field names should be the
# keys. Otherwise, they should be 'keys' and 'values'.
all_str_keys = True
if sys.hexversion >= 0x03000000:
tp_str = str
tp_bytes = bytes
converters = {tp_str: lambda x: x,
tp_bytes: lambda x: x.decode('UTF-8'),
np.bytes_:
lambda x: bytes(x).decode('UTF-8'),
np.unicode_: lambda x: str(x)}
tp_conv = lambda x: converters[type(x)](x)
tp_conv_str = lambda x: tp_conv(x)
else:
tp_str = unicode
tp_bytes = str
converters = {tp_str: lambda x: x,
tp_bytes: lambda x: x.decode('UTF-8'),
np.bytes_:
lambda x: bytes(x).decode('UTF-8'),
np.unicode_: lambda x: unicode(x)}
tp_conv = lambda x: converters[type(x)](x)
tp_conv_str = lambda x: tp_conv(x).encode('UTF-8')
tps = tuple(converters.keys())
for k in b.keys():
if type(k) not in tps:
all_str_keys = False
break
try:
k_str = tp_conv(k)
except:
all_str_keys = False
break
if all_str_keys:
assert set(a.dtype.names) == set([tp_conv_str(k)
for k in b.keys()])
for k in b:
assert_equal_matlab_format(a[tp_conv_str(k)][0],
b[k], options)
else:
names = (options.dict_like_keys_name,
options.dict_like_values_name)
assert set(a.dtype.names) == set(names)
keys = a[names[0]][0]
values = a[names[1]][0]
assert_equal_matlab_format(keys, tuple(b.keys()), options)
assert_equal_matlab_format(values, tuple(b.values()),
options)
elif type(b) in (list, tuple, set, frozenset, collections.deque):
assert_equal_matlab_format(a, np.object_(list(b)), options)
elif not isinstance(b, (np.generic, np.ndarray)):
if b is None:
# It should be np.zeros(shape=(0, 1), dtype='float64'))
assert type(a) == np.ndarray
assert a.dtype == np.dtype('float64')
assert a.shape == (1, 0)
elif (sys.hexversion >= 0x03000000 \
and isinstance(b, (bytes, str, bytearray))) \
or (sys.hexversion < 0x03000000 \
and isinstance(b, (bytes, unicode, bytearray))):
if len(b) == 0:
assert_equal(a, np.zeros(shape=(1, 0), dtype='U'),
options)
elif isinstance(b, (bytes, bytearray)):
try:
c = np.unicode_(b.decode('ASCII'))
except:
c = np.bytes_(b)
assert_equal(a, np.atleast_2d(c), options)
else:
assert_equal(a, np.atleast_2d(np.unicode_(b)), options)
#.........这里部分代码省略.........
开发者ID:sungjinlees,项目名称:hdf5storage,代码行数:101,代码来源:asserts.py
示例20: _all_bytes
numpy_dtype_to_field_mapping = {
np.float64().dtype.num : 'double',
np.float32().dtype.num : 'float',
np.bool_().dtype.num : 'bit',
np.uint8().dtype.num : 'unsignedByte',
np.int16().dtype.num : 'short',
np.int32().dtype.num : 'int',
np.int64().dtype.num : 'long',
np.complex64().dtype.num : 'floatComplex',
np.complex128().dtype.num : 'doubleComplex',
np.unicode_().dtype.num : 'unicodeChar'
}
numpy_dtype_to_field_mapping[np.bytes_().dtype.num] = 'char'
def _all_bytes(column):
for x in column:
if not isinstance(x, bytes):
return False
return True
def _all_unicode(column):
for x in column:
if not isinstance(x, six.text_type):
return False
return True
开发者ID:astrosilverio,项目名称:astropy,代码行数:29,代码来源:converters.py
注:本文中的numpy.bytes_函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论