• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.void函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.void函数的典型用法代码示例。如果您正苦于以下问题:Python void函数的具体用法?Python void怎么用?Python void使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了void函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: check_numpy_scalar_argument_return_void

 def check_numpy_scalar_argument_return_void(self):
     f = PyCFunction('foo')
     f += Variable('a1', numpy.void, 'in, out')
     f += Variable('a2', numpy.void, 'in, out')
     foo = f.build()
     args = ('he', 4)
     results = (numpy.void('he'), numpy.void(4))
     assert_equal(foo(*args), results)
开发者ID:dagss,项目名称:f2py-g3,代码行数:8,代码来源:test_py_support.py


示例2: test_meta_nonempty

def test_meta_nonempty():
    df1 = pd.DataFrame({'A': pd.Categorical(['Alice', 'Bob', 'Carol']),
                        'B': list('abc'),
                        'C': 'bar',
                        'D': np.float32(1),
                        'E': np.int32(1),
                        'F': pd.Timestamp('2016-01-01'),
                        'G': pd.date_range('2016-01-01', periods=3,
                                           tz='America/New_York'),
                        'H': pd.Timedelta('1 hours', 'ms'),
                        'I': np.void(b' '),
                        'J': pd.Categorical([UNKNOWN_CATEGORIES] * 3)},
                       columns=list('DCBAHGFEIJ'))
    df2 = df1.iloc[0:0]
    df3 = meta_nonempty(df2)
    assert (df3.dtypes == df2.dtypes).all()
    assert df3['A'][0] == 'Alice'
    assert df3['B'][0] == 'foo'
    assert df3['C'][0] == 'foo'
    assert df3['D'][0] == np.float32(1)
    assert df3['D'][0].dtype == 'f4'
    assert df3['E'][0] == np.int32(1)
    assert df3['E'][0].dtype == 'i4'
    assert df3['F'][0] == pd.Timestamp('1970-01-01 00:00:00')
    assert df3['G'][0] == pd.Timestamp('1970-01-01 00:00:00',
                                       tz='America/New_York')
    assert df3['H'][0] == pd.Timedelta('1', 'ms')
    assert df3['I'][0] == 'foo'
    assert df3['J'][0] == UNKNOWN_CATEGORIES

    s = meta_nonempty(df2['A'])
    assert s.dtype == df2['A'].dtype
    assert (df3['A'] == s).all()
开发者ID:rlugojr,项目名称:dask,代码行数:33,代码来源:test_utils_dataframe.py


示例3: _convert_value

    def _convert_value(self, value):
        """Convert a string into a numpy object (scalar or array).

        The value is most of the time a string, but it can be python object
        in case if TIFF decoder for example.
        """
        if isinstance(value, list):
            # convert to a numpy array
            return numpy.array(value)
        if isinstance(value, dict):
            # convert to a numpy associative array
            key_dtype = numpy.min_scalar_type(list(value.keys()))
            value_dtype = numpy.min_scalar_type(list(value.values()))
            associative_type = [('key', key_dtype), ('value', value_dtype)]
            assert key_dtype.kind != "O" and value_dtype.kind != "O"
            return numpy.array(list(value.items()), dtype=associative_type)
        if isinstance(value, numbers.Number):
            dtype = numpy.min_scalar_type(value)
            assert dtype.kind != "O"
            return dtype.type(value)

        if isinstance(value, six.binary_type):
            try:
                value = value.decode('utf-8')
            except UnicodeDecodeError:
                return numpy.void(value)

        if " " in value:
            result = self._convert_list(value)
        else:
            result = self._convert_scalar_value(value)
        return result
开发者ID:vallsv,项目名称:silx,代码行数:32,代码来源:fabioh5.py


示例4: test_meta_nonempty

def test_meta_nonempty():
    df1 = pd.DataFrame({'A': pd.Categorical(['Alice', 'Bob', 'Carol']),
                        'B': list('abc'),
                        'C': 'bar',
                        'D': 3.0,
                        'E': pd.Timestamp('2016-01-01'),
                        'F': pd.date_range('2016-01-01', periods=3,
                                           tz='America/New_York'),
                        'G': pd.Timedelta('1 hours'),
                        'H': np.void(b' ')},
                       columns=list('DCBAHGFE'))
    df2 = df1.iloc[0:0]
    df3 = meta_nonempty(df2)
    assert (df3.dtypes == df2.dtypes).all()
    assert df3['A'][0] == 'Alice'
    assert df3['B'][0] == 'foo'
    assert df3['C'][0] == 'foo'
    assert df3['D'][0] == 1.0
    assert df3['E'][0] == pd.Timestamp('1970-01-01 00:00:00')
    assert df3['F'][0] == pd.Timestamp('1970-01-01 00:00:00',
                                       tz='America/New_York')
    assert df3['G'][0] == pd.Timedelta('1 days')
    assert df3['H'][0] == 'foo'

    s = meta_nonempty(df2['A'])
    assert s.dtype == df2['A'].dtype
    assert (df3['A'] == s).all()
开发者ID:ankravch,项目名称:dask,代码行数:27,代码来源:test_utils_dataframe.py


示例5: _saveValue

 def _saveValue(self, group, name, value):
     # we pickle to a string and convert to numpy.void,
     # because HDF5 has some limitations as to which strings it can serialize
     # (see http://docs.h5py.org/en/latest/strings.html)
     pickled = numpy.void(pickle.dumps(value, 0))
     dset = group.create_dataset(name, data=pickled)
     dset.attrs['version'] = self._version
     self._failed_to_deserialize = False
开发者ID:ilastik,项目名称:ilastik,代码行数:8,代码来源:appletSerializer.py


示例6: store

    def store(self, k, v):
        logging.info("{} storing {}".format(self.TAG, k))
        v_ = np.void(zlib.compress(cPickle.dumps(v, protocol=cPickle.HIGHEST_PROTOCOL)))

        if k in self.db:
            logging.error("{} Overwriting group {}!".format(self.TAG, k))
            del self.db[k]

        self.db[k] = [v_]
开发者ID:jonathanmasci,项目名称:EG16_tutorial,代码行数:9,代码来源:snapshotter.py


示例7: serialize_hdf5

    def serialize_hdf5(self, h5py_group):
        logger.debug("Serializing")
        h5py_group[self.HDF5_GROUP_FILENAME] = self._filename
        h5py_group["pickled_type"] = pickle.dumps(type(self), 0)

        # HACK: can this be done more elegantly?
        with tempfile.TemporaryFile() as f:
            self._tiktorch_net.serialize(f)
            f.seek(0)
            h5py_group["classifier"] = numpy.void(f.read())
开发者ID:ilastik,项目名称:lazyflow,代码行数:10,代码来源:tiktorchLazyflowClassifier.py


示例8: VideoToStringArray

def VideoToStringArray(video_array):
    """Converts a NCHW video array to a N length string array with
    JPEG encoded strings, to be able to store as h5 files.
    """
    nframes = video_array.shape[0]
    frames = np.split(np.transpose(video_array, (0, 2, 3, 1)), nframes, axis=0)
    # np.void from http://docs.h5py.org/en/latest/strings.html
    frames = np.array([np.void(cv2.imencode(
        '.jpg', frame[0])[1].tostring()) for frame in frames])
    return frames
开发者ID:TPNguyen,项目名称:DetectAndTrack,代码行数:10,代码来源:video_io.py


示例9: save_dataset_as_hdf5

def save_dataset_as_hdf5(dataset, filename=None, variant=None):
    """
    Method to write simple datasets to an HDF5 file.

    :param dataset: The dataset to be stored as a dictionary of tuples.
        Each entry is one usage and contains (input_data, targets)
    :type dataset: dict[unicode, (numpy.ndarray, pylstm.targets.Targets)]

    :param filename: Filename/path of the file that should be written.
        Will overwrite if it already exists. Can be None if variant is given.
    :type filename: unicode

    :param variant: hdf5 group object the dataset will be saved to instead of
        writing it to a new file. Either this or filename has to be set.

    :rtype: None
    """
    hdffile = None
    if variant is None:
        assert filename is not None
        import h5py
        hdffile = h5py.File(filename, "w")
        variant = hdffile

    if 'description' in dataset:
        variant.attrs['description'] = dataset['description']
    for usage in ['training', 'validation', 'test']:
        if usage not in dataset:
            continue
        input_data, targets = dataset[usage]
        grp = variant.create_group(usage)

        grp.create_dataset('input_data', data=input_data,
                           chunks=get_chunksize(input_data),
                           compression="gzip")

        if targets.is_labeling():
            targets_encoded = np.void(cPickle.dumps(targets.data))
            targets_ds = grp.create_dataset('targets',
                                            data=targets_encoded,
                                            dtype=targets_encoded.dtype)
        else:
            targets_ds = grp.create_dataset(
                'targets',
                data=targets.data,
                chunks=get_chunksize(targets.data),
                compression="gzip"
            )
        targets_ds.attrs.create('targets_type', str(targets.targets_type[0]))
        targets_ds.attrs.create('binarize_to', targets.binarize_to or 0)
        if targets.mask is not None:
            grp.create_dataset('mask', data=targets.mask, dtype='u1')

    if hdffile is not None:
        hdffile.close()
开发者ID:Qwlouse,项目名称:pylstm,代码行数:55,代码来源:loader.py


示例10: write_hdf5

    def write_hdf5(self, filename, dataset_name=None, info=None):
        r"""Writes ImageArray to hdf5 file.

        Parameters
        ----------
        filename: string
            The filename to create and write a dataset to

        dataset_name: string
            The name of the dataset to create in the file.

        info: dictionary
            A dictionary of supplementary info to write to append as attributes
            to the dataset.

        Examples
        --------
        >>> a = YTArray([1,2,3], 'cm')

        >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}

        >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
        ...              info=myinfo)

        """
        import h5py
        from yt.extern.six.moves import cPickle as pickle

        if info is None:
            info = {}

        info["units"] = str(self.units)
        info["unit_registry"] = np.void(pickle.dumps(self.units.registry.lut))

        if dataset_name is None:
            dataset_name = "array_data"

        f = h5py.File(filename)
        if dataset_name in f.keys():
            d = f[dataset_name]
            # Overwrite without deleting if we can get away with it.
            if d.shape == self.shape and d.dtype == self.dtype:
                d[:] = self
                for k in d.attrs.keys():
                    del d.attrs[k]
            else:
                del f[dataset_name]
                d = f.create_dataset(dataset_name, data=self)
        else:
            d = f.create_dataset(dataset_name, data=self)

        for k, v in info.items():
            d.attrs[k] = v
        f.close()
开发者ID:danielgrassinger,项目名称:yt_new_frontend,代码行数:54,代码来源:yt_array.py


示例11: metadata

    def metadata(self, value):
        try:
            del self.metadata
        except KeyError:
            pass

        dump = pickle.dumps(value)

        for i, start in enumerate(range(0, len(dump), MAX_ATTRIBUTE_SIZE)):
            self._group.attrs['_metadata{}'.format(i)] = np.void(
                dump[start : start + MAX_ATTRIBUTE_SIZE])
        self._group.attrs['_metadata_num'] = i + 1
开发者ID:arvidfm,项目名称:masters-thesis,代码行数:12,代码来源:dataset.py


示例12: write_to

    def write_to(self, group, append=False):
        """Writes the properties to a `group`, or append it"""
        data = self.data
        if append is True:
            try:
                # concatenate original and new properties in a single list
                original = read_properties(group)
                data = original + data
            except EOFError:
                pass  # no former data to append on

        # h5py does not support embedded NULLs in strings ('\x00')
        data = pickle.dumps(data).replace(b'\x00', b'__NULL__')
        group['properties'][...] = np.void(data)
开发者ID:bootphon,项目名称:h5features,代码行数:14,代码来源:properties.py


示例13: save

 def save(self, hdf5_handle):
     g = hdf5_handle
     # Class settings
     g.attrs.update(self.settings)
     # Class attributes
     h = g.create_group("class")
     h.attrs["label"] = self.label
     if self.settings["store_cxx_serial"]:
         if self.verbose: self.log << "[h5] Writing cxx serial" << self.log.endl
         # Prune pid data if not required to compute gradients
         prune_pid_data = False if self.options['spectrum.gradients'] else True
         cxx_serial = self.spectrum.saves(prune_pid_data)
         h = g.create_dataset("cxx_serial", data=np.void(cxx_serial))
     if self.settings["store_cmap"]:
         if self.verbose: self.log << "[h5] Writing coefficient map" << self.log.endl
         h = g.create_group("cmap")
         for idx, cmap in enumerate(self.cmap):
             hh = h.create_group('%d' % idx)
             for key in cmap:
                 hh.create_dataset(key, data=cmap[key], compression='gzip')
     if self.settings["store_gcmap"]:
         if self.verbose: self.log << "[h5] Writing global coefficient map" << self.log.endl
         h = g.create_group("gcmap")
         for idx, gcmap in enumerate(self.gcmap):
             hh = h.create_group('%d' % idx)
             for key in gcmap:
                 hh.create_dataset(key, data=gcmap[key], compression='gzip')
     if self.settings["store_sdmap"]:
         if self.verbose: self.log << "[h5] Writing descriptor map" << self.log.endl
         h = g.create_group('sdmap')
         for idx, sdmap in enumerate(self.sdmap):
             hh = h.create_group('%d' % idx)
             for key in sdmap:
                 hh.create_dataset(key, data=sdmap[key], compression='gzip')
     if self.settings["store_gsdmap"]:
         if self.verbose: self.log << "[h5] Writing global descriptor map" << self.log.endl
         h = g.create_group('gsdmap')
         for idx, gsdmap in enumerate(self.gsdmap):
             hh = h.create_group('%d' % idx)
             for key in gsdmap:
                 hh.create_dataset(key, data=gsdmap[key], compression='gzip')
     if self.settings["store_sd"]:
         if self.verbose: self.log << "[h5] Writing descriptor matrix" << self.log.endl
         g.create_dataset('sd', data=self.sd, compression='gzip')
     if self.settings["store_gsd"]:
         if self.verbose: self.log << "[h5] Writing global descriptor matrix" << self.log.endl
         g.create_dataset('gsd', data=self.gsd, compression='gzip')
     return self
开发者ID:capoe,项目名称:soapxx,代码行数:48,代码来源:run.py


示例14: _create_data

    def _create_data(self):
        """Initialize hold data by merging all headers of each frames.
        """
        headers = []
        types = set([])
        for fabio_frame in self.__fabio_reader.iter_frames():
            header = fabio_frame.header

            data = []
            for key, value in header.items():
                data.append("%s: %s" % (str(key), str(value)))

            data = "\n".join(data)
            try:
                line = data.encode("ascii")
                types.add(numpy.string_)
            except UnicodeEncodeError:
                try:
                    line = data.encode("utf-8")
                    types.add(numpy.unicode_)
                except UnicodeEncodeError:
                    # Fallback in void
                    line = numpy.void(data)
                    types.add(numpy.void)

            headers.append(line)

        if numpy.void in types:
            dtype = numpy.void
        elif numpy.unicode_ in types:
            dtype = numpy.unicode_
        else:
            dtype = numpy.string_

        if dtype == numpy.unicode_ and h5py is not None:
            # h5py only support vlen unicode
            dtype = h5py.special_dtype(vlen=six.text_type)

        return numpy.array(headers, dtype=dtype)
开发者ID:vallsv,项目名称:silx,代码行数:39,代码来源:fabioh5.py


示例15: test_void_scalar_recursion

 def test_void_scalar_recursion(self):
     # gh-9345
     repr(np.void(b'test'))  # RecursionError ?
开发者ID:Juanlu001,项目名称:numpy,代码行数:3,代码来源:test_arrayprint.py


示例16: save_bytes

def save_bytes(s, parent, name, _):
    parent.create_dataset(name, data=np.void(s))
开发者ID:menpo,项目名称:h5it,代码行数:2,代码来源:base.py


示例17: create_hdf5_types

def create_hdf5_types(group):
    print("- Creating HDF types...")

    main_group = group.create_group("HDF5")

    # H5T_INTEGER

    int_data = numpy.random.randint(-100, 100, size=10 * 4 * 4 * 4)
    uint_data = numpy.random.randint(0, 100, size=10 * 4 * 4 * 4)

    group = main_group.create_group("integer_little_endian")
    for size in (1, 2, 4, 8):
        store_subdimensions(group, int_data, '<i' + str(size),
                            prefix='int' + str(size * 8))
        store_subdimensions(group, uint_data, '<u' + str(size),
                            prefix='uint' + str(size * 8))

    group = main_group.create_group("integer_big_endian")
    for size in (1, 2, 4, 8):
        store_subdimensions(group, int_data, '>i' + str(size),
                            prefix='int' + str(size * 8))
        store_subdimensions(group, uint_data, '>u' + str(size),
                            prefix='uint' + str(size * 8))

    # H5T_FLOAT

    float_data = numpy.random.rand(10 * 4 * 4 * 4)
    group = main_group.create_group("float_little_endian")

    for size in (2, 4, 8):
        store_subdimensions(group, float_data, '<f' + str(size),
                            prefix='float' + str(size * 8))

    group = main_group.create_group("float_big_endian")

    for size in (2, 4, 8):
        store_subdimensions(group, float_data, '>f' + str(size),
                            prefix='float' + str(size * 8))

    # H5T_TIME

    main_group.create_group("time")

    # H5T_STRING

    main_group["text/ascii"] = b"abcd"
    main_group["text/bad_ascii"] = b"ab\xEFcd\xFF"
    main_group["text/utf8"] = u"me \u2661 tu"

    # H5T_BITFIELD

    main_group.create_group("bitfield")

    # H5T_OPAQUE

    group = main_group.create_group("opaque")

    main_group["opaque/ascii"] = numpy.void(b"abcd")
    main_group["opaque/utf8"] = numpy.void(u"i \u2661 my mother".encode("utf-8"))
    main_group["opaque/thing"] = numpy.void(b"\x10\x20\x30\x40\xF0")
    main_group["opaque/big_thing"] = numpy.void(b"\x10\x20\x30\x40\xF0" * 100000)

    data = numpy.void(b"\x10\x20\x30\x40\xFF" * 20)
    data = numpy.array([data] * 10 * 4 * 4 * 4, numpy.void)
    store_subdimensions(group, data, "void")

    # H5T_COMPOUND

    a = numpy.array([(1, 2., 'Hello'), (2, 3., "World")],
                    dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])

    b = numpy.zeros(3, dtype='3int8, float32, (2,3)float64')

    c = numpy.zeros(3, dtype=('i4', [('r', 'u1'), ('g', 'u1'), ('b', 'u1'), ('a', 'u1')]))

    d = numpy.zeros(3, dtype=[('x', 'f4'), ('y', numpy.float32), ('value', 'f4', (2, 2))])

    e = numpy.zeros(3, dtype={'names': ['col1', 'col2'], 'formats': ['i4', 'f4']})

    f = numpy.array([(1.5, 2.5, (1.0, 2.0)), (3., 4., (4., 5.)), (1., 3., (2., 6.))],
                    dtype=[('x', 'f4'), ('y', numpy.float32), ('value', 'f4', (2, 2))])

    main_group["compound/numpy_example_a"] = a
    main_group["compound/numpy_example_b"] = b
    main_group["compound/numpy_example_c"] = c
    main_group["compound/numpy_example_d"] = d
    main_group["compound/numpy_example_e"] = e
    main_group["compound/numpy_example_f"] = f

    dt = numpy.dtype([('start', numpy.uint32), ('stop', numpy.uint32)])
    vlen_dt = h5py.special_dtype(vlen=dt)
    data = numpy.array([[(1, 2), (2, 3)], [(3, 5), (5, 8), (8, 9)]], vlen_dt)
    dataset = main_group.create_dataset("compound/vlen", data.shape, data.dtype)
    for i, row in enumerate(data):
        dataset[i] = row

    # numpy complex is a H5T_COMPOUND

    real_data = numpy.random.rand(10 * 4 * 4 * 4)
    imaginary_data = numpy.random.rand(10 * 4 * 4 * 4)
#.........这里部分代码省略.........
开发者ID:dnaudet,项目名称:silx,代码行数:101,代码来源:create_h5_sample.py


示例18: pick

def pick(obj):
    """create a serialized object that can go into hdf5 in py2 and py3, and can be read by both
    """
    return np.void(pickle.dumps(obj, 0))
开发者ID:jrleja,项目名称:bsfh,代码行数:4,代码来源:write_results.py


示例19: _nonempty_index

        else:
            data = _nonempty_index(idx.categories)
            cats = None
        return pd.CategoricalIndex(data, categories=cats,
                                   ordered=idx.ordered, name=idx.name)
    elif typ is pd.MultiIndex:
        levels = [_nonempty_index(i) for i in idx.levels]
        labels = [[0, 0] for i in idx.levels]
        return pd.MultiIndex(levels=levels, labels=labels, names=idx.names)
    raise TypeError("Don't know how to handle index of "
                    "type {0}".format(type(idx).__name__))


_simple_fake_mapping = {
    'b': np.bool_(True),
    'V': np.void(b' '),
    'M': np.datetime64('1970-01-01'),
    'm': np.timedelta64(1),
    'S': np.str_('foo'),
    'a': np.str_('foo'),
    'U': np.unicode_('foo'),
    'O': 'foo'
}


def _scalar_from_dtype(dtype):
    if dtype.kind in ('i', 'f', 'u'):
        return dtype.type(1)
    elif dtype.kind == 'c':
        return dtype.type(complex(1, 0))
    elif dtype.kind in _simple_fake_mapping:
开发者ID:fortizc,项目名称:dask,代码行数:31,代码来源:utils.py


示例20: analyze

 def analyze(self):
     plt.plot([1, 2, 0, 3, 4])
     f = io.BytesIO()
     plt.savefig(f, format="PNG")
     f.seek(0)
     self.set_dataset("thumbnail", np.void(f.read()))
开发者ID:JQIamo,项目名称:artiq,代码行数:6,代码来源:thumbnail.py



注:本文中的numpy.void函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.vsplit函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.vectorize函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap