• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python xarray.Dataset类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中xarray.Dataset的典型用法代码示例。如果您正苦于以下问题:Python Dataset类的具体用法?Python Dataset怎么用?Python Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Dataset类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_open_and_do_math

 def test_open_and_do_math(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     with create_tmp_file() as tmp:
         original.to_netcdf(tmp)
         with open_mfdataset(tmp) as ds:
             actual = 1.0 * ds
             self.assertDatasetAllClose(original, actual)
开发者ID:ashang,项目名称:xarray,代码行数:7,代码来源:test_backends.py


示例2: radec2azel

def radec2azel(scale: xarray.Dataset,
               latlon: Tuple[float, float], time: datetime=None) -> xarray.Dataset:

    if latlon is None or not isinstance(scale, xarray.Dataset):
        return None

    if time is None:
        with fits.open(scale.filename, mode='readonly') as f:
            try:
                t = f[0].header['FRAME']  # TODO this only works from Solis?
            except KeyError:
                logging.error('no time given in file or manually, cannot compute az/el')
                return None
        time = parse(t)
        logging.info('using FITS header for time')
    elif isinstance(time, datetime):
        pass
    elif isinstance(time, (float, int)):  # assume UT1_Unix
        time = datetime.utcfromtimestamp(time)
    else:  # user override of frame time
        time = parse(time)

    print('image time:', time)
# %% knowing camera location, time, and sky coordinates observed, convert to az/el for each pixel
    az, el = pymap3d.radec2azel(scale['ra'], scale['dec'], latlon[0], latlon[1], time)
# %% collect output
    scale['az'] = (('y', 'x'), az)
    scale['el'] = (('y', 'x'), el)
    scale.attrs['lat'] = latlon[0]
    scale.attrs['lon'] = latlon[1]
    scale.attrs['time'] = time

    return scale
开发者ID:scienceopen,项目名称:astrometry_azel,代码行数:33,代码来源:base.py


示例3: test_concat_multiindex

 def test_concat_multiindex(self):
     x = pd.MultiIndex.from_product([[1, 2, 3], ['a', 'b']])
     expected = Dataset({'x': x})
     actual = concat([expected.isel(x=slice(2)),
                      expected.isel(x=slice(2, None))], 'x')
     assert expected.equals(actual)
     assert isinstance(actual.x.to_index(), pd.MultiIndex)
开发者ID:CCI-Tools,项目名称:xarray,代码行数:7,代码来源:test_combine.py


示例4: test_roundtrip_strings_with_fill_value

    def test_roundtrip_strings_with_fill_value(self):
        values = np.array(['ab', 'cdef', np.nan], dtype=object)
        encoding = {'_FillValue': np.string_('X'), 'dtype': np.dtype('S1')}
        original = Dataset({'x': ('t', values, {}, encoding)})
        expected = original.copy(deep=True)
        expected['x'][:2] = values[:2].astype('S')
        with self.roundtrip(original) as actual:
            self.assertDatasetIdentical(expected, actual)

        original = Dataset({'x': ('t', values, {}, {'_FillValue': '\x00'})})
        if not isinstance(self, Only32BitTypes):
            # these stores can save unicode strings
            expected = original.copy(deep=True)
        if isinstance(self, BaseNetCDF4Test):
            # netCDF4 can't keep track of an empty _FillValue for VLEN
            # variables
            expected['x'][-1] = ''
        elif (isinstance(self, (NetCDF3ViaNetCDF4DataTest,
                                NetCDF4ClassicViaNetCDF4DataTest)) or
              (has_netCDF4 and type(self) is GenericNetCDFDataTest)):
            # netCDF4 can't keep track of an empty _FillValue for nc3, either:
            # https://github.com/Unidata/netcdf4-python/issues/273
            expected['x'][-1] = np.string_('')
        with self.roundtrip(original) as actual:
            self.assertDatasetIdentical(expected, actual)
开发者ID:ashang,项目名称:xarray,代码行数:25,代码来源:test_backends.py


示例5: _preprocess_dataset

 def _preprocess_dataset(self, ds: Dataset):
     # Convert specific data variables to coordinate variables
     for var_name in EXTRA_COORDS_VAR_NAMES:
         if var_name in ds.data_vars:
             ds.set_coords(var_name, inplace=True)
     # print(ds)
     return ds
开发者ID:CAB-LAB,项目名称:cablab-core,代码行数:7,代码来源:cube_access.py


示例6: test_coordinates_encoding

    def test_coordinates_encoding(self):
        def equals_latlon(obj):
            return obj == 'lat lon' or obj == 'lon lat'

        original = Dataset({'temp': ('x', [0, 1]), 'precip': ('x', [0, -1])},
                           {'lat': ('x', [2, 3]), 'lon': ('x', [4, 5])})
        with self.roundtrip(original) as actual:
            self.assertDatasetIdentical(actual, original)
        with create_tmp_file() as tmp_file:
            original.to_netcdf(tmp_file)
            with open_dataset(tmp_file, decode_coords=False) as ds:
                self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates']))
                self.assertTrue(equals_latlon(ds['precip'].attrs['coordinates']))
                self.assertNotIn('coordinates', ds.attrs)
                self.assertNotIn('coordinates', ds['lat'].attrs)
                self.assertNotIn('coordinates', ds['lon'].attrs)

        modified = original.drop(['temp', 'precip'])
        with self.roundtrip(modified) as actual:
            self.assertDatasetIdentical(actual, modified)
        with create_tmp_file() as tmp_file:
            modified.to_netcdf(tmp_file)
            with open_dataset(tmp_file, decode_coords=False) as ds:
                self.assertTrue(equals_latlon(ds.attrs['coordinates']))
                self.assertNotIn('coordinates', ds['lat'].attrs)
                self.assertNotIn('coordinates', ds['lon'].attrs)
开发者ID:ashang,项目名称:xarray,代码行数:26,代码来源:test_backends.py


示例7: adjust_temporal_attrs_impl

def adjust_temporal_attrs_impl(ds: xr.Dataset) -> xr.Dataset:
    """
    Adjust the global temporal attributes of the dataset by doing some
    introspection of the dataset and adjusting the appropriate attributes
    accordingly.

    In case the determined attributes do not exist in the dataset, these will
    be added.

    For more information on suggested global attributes see
    `Attribute Convention for Data Discovery
    <http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery>`_

    :param ds: Dataset to adjust
    :return: Adjusted dataset
    """

    temporal_attrs = _get_temporal_cf_attrs_from_var(ds)

    if temporal_attrs:
        ds = ds.copy()
        # Align temporal attributes with the ones from the shallow Dataset copy
        for key in temporal_attrs:
            if temporal_attrs[key] is not None:
                ds.attrs[key] = temporal_attrs[key]
            else:
                ds.attrs.pop(key, None)

    return ds
开发者ID:CCI-Tools,项目名称:ect-core,代码行数:29,代码来源:opimpl.py


示例8: test_roundtrip_object_dtype

 def test_roundtrip_object_dtype(self):
     floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object)
     floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object)
     letters = np.array(['ab', 'cdef', 'g'], dtype=object)
     letters_nans = np.array(['ab', 'cdef', np.nan], dtype=object)
     all_nans = np.array([np.nan, np.nan], dtype=object)
     original = Dataset({'floats': ('a', floats),
                         'floats_nans': ('a', floats_nans),
                         'letters': ('b', letters),
                         'letters_nans': ('b', letters_nans),
                         'all_nans': ('c', all_nans),
                         'nan': ([], np.nan)})
     expected = original.copy(deep=True)
     if isinstance(self, Only32BitTypes):
         # for netCDF3 tests, expect the results to come back as characters
         expected['letters_nans'] = expected['letters_nans'].astype('S')
         expected['letters'] = expected['letters'].astype('S')
     with self.roundtrip(original) as actual:
         try:
             self.assertDatasetIdentical(expected, actual)
         except AssertionError:
             # Most stores use '' for nans in strings, but some don't
             # first try the ideal case (where the store returns exactly)
             # the original Dataset), then try a more realistic case.
             # ScipyDataTest, NetCDF3ViaNetCDF4DataTest and NetCDF4DataTest
             # all end up using this case.
             expected['letters_nans'][-1] = ''
             self.assertDatasetIdentical(expected, actual)
开发者ID:ashang,项目名称:xarray,代码行数:28,代码来源:test_backends.py


示例9: test_to_dask_dataframe

    def test_to_dask_dataframe(self):
        # Test conversion of Datasets to dask DataFrames
        x = da.from_array(np.random.randn(10), chunks=4)
        y = np.arange(10, dtype='uint8')
        t = list('abcdefghij')

        ds = Dataset(OrderedDict([('a', ('t', x)),
                                  ('b', ('t', y)),
                                  ('t', ('t', t))]))

        expected_pd = pd.DataFrame({'a': x,
                                    'b': y},
                                   index=pd.Index(t, name='t'))

        # test if 1-D index is correctly set up
        expected = dd.from_pandas(expected_pd, chunksize=4)
        actual = ds.to_dask_dataframe(set_index=True)
        # test if we have dask dataframes
        assert isinstance(actual, dd.DataFrame)

        # use the .equals from pandas to check dataframes are equivalent
        assert_frame_equal(expected.compute(), actual.compute())

        # test if no index is given
        expected = dd.from_pandas(expected_pd.reset_index(drop=False),
                                  chunksize=4)

        actual = ds.to_dask_dataframe(set_index=False)

        assert isinstance(actual, dd.DataFrame)
        assert_frame_equal(expected.compute(), actual.compute())
开发者ID:jcmgray,项目名称:xarray,代码行数:31,代码来源:test_dask.py


示例10: state_to_xarray

def state_to_xarray(state):
    '''Convert a dictionary of climlab.Field objects to xarray.Dataset

    Input: dictionary of climlab.Field objects
    (e.g. process.state or process.diagnostics dictionary)

    Output: xarray.Dataset object with all spatial axes,
    including 'bounds' axes indicating cell boundaries in each spatial dimension.

    Any items in the dictionary that are not instances of climlab.Field
    are ignored.'''
    from climlab.domain.field import Field

    ds = Dataset()
    for name, field in state.items():
        if isinstance(field, Field):
            ds[name] = Field_to_xarray(field)
            dom = field.domain
            for axname, ax in dom.axes.items():
                bounds_name = axname + '_bounds'
                ds.coords[bounds_name] = DataArray(ax.bounds, dims=[bounds_name],
                                    coords={bounds_name:ax.bounds})
                try:
                    ds[bounds_name].attrs['units'] = ax.units
                except:
                    pass
        else:
            warnings.warn('{} excluded from Dataset because it is not a Field variable.'.format(name))
    return ds
开发者ID:brian-rose,项目名称:climlab,代码行数:29,代码来源:xarray.py


示例11: test_dask_layers_and_dependencies

def test_dask_layers_and_dependencies():
    ds = Dataset({'foo': ('x', range(5)),
                  'bar': ('x', range(5))}).chunk()

    x = dask.delayed(ds)
    assert set(x.__dask_graph__().dependencies).issuperset(
        ds.__dask_graph__().dependencies)
    assert set(x.foo.__dask_graph__().dependencies).issuperset(
        ds.__dask_graph__().dependencies)
开发者ID:benbovy,项目名称:xarray,代码行数:9,代码来源:test_dask.py


示例12: test_save_mfdataset_roundtrip

 def test_save_mfdataset_roundtrip(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     datasets = [original.isel(x=slice(5)),
                 original.isel(x=slice(5, 10))]
     with create_tmp_file() as tmp1:
         with create_tmp_file() as tmp2:
             save_mfdataset(datasets, [tmp1, tmp2])
             with open_mfdataset([tmp1, tmp2]) as actual:
                 self.assertDatasetIdentical(actual, original)
开发者ID:ashang,项目名称:xarray,代码行数:9,代码来源:test_backends.py


示例13: adjust_spatial_attrs_impl

def adjust_spatial_attrs_impl(ds: xr.Dataset, allow_point: bool) -> xr.Dataset:
    """
    Adjust the global spatial attributes of the dataset by doing some
    introspection of the dataset and adjusting the appropriate attributes
    accordingly.

    In case the determined attributes do not exist in the dataset, these will
    be added.

    For more information on suggested global attributes see
    `Attribute Convention for Data Discovery
    <http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery>`_

    :param ds: Dataset to adjust
    :param allow_point: Whether to accept single point cells
    :return: Adjusted dataset
    """

    copied = False

    for dim in ('lon', 'lat'):
        geo_spatial_attrs = _get_geo_spatial_cf_attrs_from_var(ds, dim, allow_point=allow_point)
        if geo_spatial_attrs:
            # Copy any new attributes into the shallow Dataset copy
            for key in geo_spatial_attrs:
                if geo_spatial_attrs[key] is not None:
                    if not copied:
                        ds = ds.copy()
                        copied = True
                    ds.attrs[key] = geo_spatial_attrs[key]

    lon_min = ds.attrs.get('geospatial_lon_min')
    lat_min = ds.attrs.get('geospatial_lat_min')
    lon_max = ds.attrs.get('geospatial_lon_max')
    lat_max = ds.attrs.get('geospatial_lat_max')

    if lon_min is not None and lat_min is not None and lon_max is not None and lat_max is not None:

        if not copied:
            ds = ds.copy()

        ds.attrs['geospatial_bounds'] = 'POLYGON(({} {}, {} {}, {} {}, {} {}, {} {}))'. \
            format(lon_min, lat_min, lon_min, lat_max, lon_max, lat_max, lon_max, lat_min, lon_min, lat_min)

        # Determination of the following attributes from introspection in a general
        # way is ambiguous, hence it is safer to drop them than to risk preserving
        # out of date attributes.
        drop = ['geospatial_bounds_crs', 'geospatial_bounds_vertical_crs',
                'geospatial_vertical_min', 'geospatial_vertical_max',
                'geospatial_vertical_positive', 'geospatial_vertical_units',
                'geospatial_vertical_resolution']

        for key in drop:
            ds.attrs.pop(key, None)

    return ds
开发者ID:CCI-Tools,项目名称:ect-core,代码行数:56,代码来源:opimpl.py


示例14: test_concat_coords

 def test_concat_coords(self):
     data = Dataset({"foo": ("x", np.random.randn(10))})
     expected = data.assign_coords(c=("x", [0] * 5 + [1] * 5))
     objs = [data.isel(x=slice(5)).assign_coords(c=0), data.isel(x=slice(5, None)).assign_coords(c=1)]
     for coords in ["different", "all", ["c"]]:
         actual = concat(objs, dim="x", coords=coords)
         self.assertDatasetIdentical(expected, actual)
     for coords in ["minimal", []]:
         with self.assertRaisesRegexp(ValueError, "not equal across"):
             concat(objs, dim="x", coords=coords)
开发者ID:spencerahill,项目名称:xarray,代码行数:10,代码来源:test_combine.py


示例15: test_concat_encoding

 def test_concat_encoding(self):
     # Regression test for GH1297
     ds = Dataset({'foo': (['x', 'y'], np.random.random((2, 3))),
                   'bar': (['x', 'y'], np.random.random((2, 3)))},
                  {'x': [0, 1]})
     foo = ds['foo']
     foo.encoding = {"complevel": 5}
     ds.encoding = {"unlimited_dims": 'x'}
     assert concat([foo, foo], dim="x").encoding == foo.encoding
     assert concat([ds, ds], dim="x").encoding == ds.encoding
开发者ID:benbovy,项目名称:xarray,代码行数:10,代码来源:test_combine.py


示例16: test_weakrefs

    def test_weakrefs(self):
        example = Dataset({'foo': ('x', np.arange(5.0))})
        expected = example.rename({'foo': 'bar', 'x': 'y'})

        with create_tmp_file() as tmp_file:
            example.to_netcdf(tmp_file, engine='scipy')
            on_disk = open_dataset(tmp_file, engine='pynio')
            actual = on_disk.rename({'foo': 'bar', 'x': 'y'})
            del on_disk  # trigger garbage collection
            self.assertDatasetIdentical(actual, expected)
开发者ID:ashang,项目名称:xarray,代码行数:10,代码来源:test_backends.py


示例17: test_variable_order

    def test_variable_order(self):
        # doesn't work with scipy or h5py :(
        ds = Dataset()
        ds['a'] = 1
        ds['z'] = 2
        ds['b'] = 3
        ds.coords['c'] = 4

        with self.roundtrip(ds) as actual:
            self.assertEqual(list(ds), list(actual))
开发者ID:ashang,项目名称:xarray,代码行数:10,代码来源:test_backends.py


示例18: test_dataset_pickle

 def test_dataset_pickle(self):
     ds1 = Dataset({'a': DataArray(build_dask_array())})
     ds1.compute()
     self.assertFalse(ds1['a']._in_memory)
     self.assertEquals(kernel_call_count, 1)
     ds2 = pickle.loads(pickle.dumps(ds1))
     self.assertEquals(kernel_call_count, 1)
     self.assertDatasetIdentical(ds1, ds2)
     self.assertFalse(ds1['a']._in_memory)
     self.assertFalse(ds2['a']._in_memory)
开发者ID:SixtyCapital,项目名称:xarray,代码行数:10,代码来源:test_dask.py


示例19: test_persist_Dataset

    def test_persist_Dataset(self):
        ds = Dataset({'foo': ('x', range(5)),
                      'bar': ('x', range(5))}).chunk()
        ds = ds + 1
        n = len(ds.foo.data.dask)

        ds2 = ds.persist()

        assert len(ds2.foo.data.dask) == 1
        assert len(ds.foo.data.dask) == n  # doesn't mutate in place
开发者ID:CCI-Tools,项目名称:xarray,代码行数:10,代码来源:test_dask.py


示例20: diff

def diff(ds: xr.Dataset,
         ds2: xr.Dataset,
         monitor: Monitor = Monitor.NONE) -> xr.Dataset:
    """
    Calculate the difference of two datasets (ds - ds2). This is done by
    matching variable names in the two datasets against each other and taking
    the difference of matching variables.

    If lat/lon/time extents differ between the datasets, the default behavior
    is to take the intersection of the datasets and run subtraction on that.
    However, broadcasting is possible. E.g. ds(lat/lon/time) - ds(lat/lon) is
    valid. In this case the subtrahend will be stretched to the size of
    ds(lat/lon/time) so that it can be subtracted. This also works if the
    subtrahend is a single time slice of arbitrary temporal position. In this
    case, the time dimension will be squeezed out leaving a lat/lon dataset.

    :param ds: The minuend dataset
    :param ds2: The subtrahend dataset
    :param monitor: a progress monitor.
    :return: The difference dataset
    """
    try:
        # Times do not intersect
        if 0 == len(ds.time - ds2.time) and \
                len(ds.time) == len(ds2.time):  # Times are the same length
            # If the datasets don't intersect in time dimension, a naive difference
            # would return empty data variables. Hence, the time coordinate has to
            # be dropped beforehand
            ds = ds.drop('time')
            ds2 = ds2.drop('time')
            return ds - ds2
    except AttributeError:
        # It is likely that the one operand is a lat/lon array that can be
        # broadcast against the other operand
        pass

    try:
        if 1 == len(ds2.time):
            # The subtrahend is a single time-slice -> squeeze 'time' dimension to
            # be able to broadcast is along minuend
            ds2 = ds2.squeeze('time', drop=True)
    except AttributeError:
        # Doesn't have a time dimension already
        pass
    except TypeError as e:
        if 'unsized object' in str(e):
            # The 'time' variable is a scalar
            pass
        else:
            raise TypeError(str(e))

    with monitor.observing("Subtract datasets"):
        diff = ds - ds2

    return diff
开发者ID:CCI-Tools,项目名称:ect-core,代码行数:55,代码来源:arithmetics.py



注:本文中的xarray.Dataset类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python xarray.Variable类代码示例发布时间:2022-05-26
下一篇:
Python xarray.DataArray类代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap