• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python xarray.open_mfdataset函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中xarray.open_mfdataset函数的典型用法代码示例。如果您正苦于以下问题:Python open_mfdataset函数的具体用法?Python open_mfdataset怎么用?Python open_mfdataset使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了open_mfdataset函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_subset_variables

    def test_subset_variables(self):
        fileName = str(self.datadir.join('example_jan.nc'))
        timestr = ['xtime_start', 'xtime_end']
        varList = ['time_avg_avgValueWithinOceanRegion_avgSurfaceTemperature']

        # first, test loading the whole data set and then calling
        # subset_variables explicitly
        ds = xr.open_mfdataset(
            fileName,
            preprocess=lambda x: mpas_xarray.preprocess_mpas(x,
                                                             timestr=timestr,
                                                             yearoffset=1850))
        ds = mpas_xarray.subset_variables(ds, varList)
        self.assertEqual(sorted(ds.data_vars.keys()), sorted(varList))
        self.assertEqual(pd.Timestamp(ds.Time.values[0]),
                         pd.Timestamp('1855-01-16 12:22:30'))

        # next, test the same with the onlyvars argument
        ds = xr.open_mfdataset(
            fileName,
            preprocess=lambda x: mpas_xarray.preprocess_mpas(x,
                                                             timestr=timestr,
                                                             onlyvars=varList,
                                                             yearoffset=1850))
        self.assertEqual(ds.data_vars.keys(), varList)
开发者ID:toddringler,项目名称:MPAS-Analysis,代码行数:25,代码来源:test_mpas_xarray.py


示例2: scaleVSpower

def scaleVSpower():

    power = xr.open_mfdataset('/users/global/cornkle/data/OBS/modis_LST/modis_netcdf/power_maps/' \
                           'lsta_daily_power*.nc')


    scale = xr.open_mfdataset('/users/global/cornkle/data/OBS/modis_LST/modis_netcdf/scale_maps/' \
                           'lsta_daily_scale*.nc')


    scales = np.unique(scale['LSTA'].values[0:300,:,:])
    scales = scales[np.isfinite(scales)]

    power_arr = power['LSTA'][0:300]
    scale_arr = scale['LSTA'][0:300]

    mlist = []

    for s in scales:
        print('Doing '+str(s))
        mean = np.nanmean(power_arr.where(scale_arr.values == s).values)
        mlist.append(mean)


    f= plt.figure()

    plt.scatter(scales,mlist)
开发者ID:cornkle,项目名称:proj_CEH,代码行数:27,代码来源:surfaceScales_Powerdistribution.py


示例3: file_loop

def file_loop():

    lsta = xr.open_mfdataset('/users/global/cornkle/data/OBS/modis_LST/modis_netcdf/scale_maps/' \
                           'lsta_daily_scale_*.nc')


    lsta_check = xr.open_mfdataset('/users/global/cornkle/data/OBS/modis_LST/modis_netcdf/' \
                           'lsta_daily_*.nc')

    lsta_check = lsta_check.sel(lat=slice(lsta['lat'].values.min(),lsta['lat'].values.max()), lon=slice(lsta['lon'].values.min(),lsta['lon'].values.max()))


    lsta_checks = lsta_check['LSTA'].where(lsta_check['LSTA']>-800)
    lsta_checks = lsta_checks.where(lsta.time==lsta_checks.time)

    bins = np.arange(-20,20,2)
    f=plt.figure()
    plt.hist(lsta_checks.values[np.isfinite(lsta_checks.values)], bins=bins, edgecolor='k')

    bins = np.arange(-140, 141, 10)

    ll = []

    for i, b in enumerate(bins[0:-1]):

        b1 = bins[i+1]

        lmean = np.percentile(lsta_checks.where((lsta['LSTA'].values>=b) &  (lsta['LSTA'].values<b1)), 90)

        ll.append(lmean)

    pdb.set_trace()
    f = plt.figure()
    plt.scatter(bins[1::], ll)
开发者ID:cornkle,项目名称:proj_CEH,代码行数:34,代码来源:surfaceScales_distribution.py


示例4: main

def main (era_filesearch, cesm_base_filesearch, bias_output):

    print("opening data")
    era_data         = xr.open_mfdataset(era_filesearch,         concat_dim='time')
    base_cesm_data   = xr.open_mfdataset(cesm_base_filesearch,   concat_dim='time')

    print("loading data")
    era_data.load()
    base_cesm_data.load()

    print("compute means")
    emean = era_data.std(dim="time")
    cmean = base_cesm_data.std(dim="time")

    print("creating data")
    interpolated_era = xr.zeros_like(cmean)
    print("loading data")
    interpolated_era.load()

    z_interp_all_vars(emean, interpolated_era, era_data["z"].mean(dim="time"), base_cesm_data["z"].mean(dim="time"), vars_to_correct)
    interpolated_era.to_netcdf("era_interpolated_std.nc")

    print("Computing Bias")
    bias = interpolated_era - cmean

    print("writing")
    bias.to_netcdf(bias_output)
开发者ID:gutmann,项目名称:scripted_sufferin_succotash,代码行数:27,代码来源:correct_forcing.py


示例5: test_deterministic_names

 def test_deterministic_names(self):
     with create_tmp_file() as tmp:
         data = create_test_data()
         data.to_netcdf(tmp)
         with open_mfdataset(tmp) as ds:
             original_names = dict((k, v.data.name) for k, v in ds.items())
         with open_mfdataset(tmp) as ds:
             repeat_names = dict((k, v.data.name) for k, v in ds.items())
         for var_name, dask_name in original_names.items():
             self.assertIn(var_name, dask_name)
             self.assertIn(tmp, dask_name)
         self.assertEqual(original_names, repeat_names)
开发者ID:ashang,项目名称:xarray,代码行数:12,代码来源:test_backends.py


示例6: read_nc_files

def read_nc_files(dir, bounds=None):
    def rmheight(d):
        #del d["height"]
        return d

    files = get_reanalysis_file_paths(dir)
    if len(files) > 1:
        data = xarray.open_mfdataset(files, preprocess=lambda d: assert_bounds(d, bounds))
    elif len(files) == 1:
        data = xarray.open_mfdataset(files, preprocess=lambda d: assert_bounds(d, bounds))
    else:
        raise IOError("There are no .nc files in that directory.")
    return data
开发者ID:tjvandal,项目名称:pydownscale,代码行数:13,代码来源:data.py


示例7: test_lock

 def test_lock(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     with create_tmp_file() as tmp:
         original.to_netcdf(tmp, format='NETCDF3_CLASSIC')
         with open_dataset(tmp, chunks=10) as ds:
             task = ds.foo.data.dask[ds.foo.data.name, 0]
             self.assertIsInstance(task[-1], type(Lock()))
         with open_mfdataset(tmp) as ds:
             task = ds.foo.data.dask[ds.foo.data.name, 0]
             self.assertIsInstance(task[-1], type(Lock()))
         with open_mfdataset(tmp, engine='scipy') as ds:
             task = ds.foo.data.dask[ds.foo.data.name, 0]
             self.assertNotIsInstance(task[-1], type(Lock()))
开发者ID:ashang,项目名称:xarray,代码行数:13,代码来源:test_backends.py


示例8: test_open_and_do_math

 def test_open_and_do_math(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     with create_tmp_file() as tmp:
         original.to_netcdf(tmp)
         with open_mfdataset(tmp) as ds:
             actual = 1.0 * ds
             self.assertDatasetAllClose(original, actual)
开发者ID:ashang,项目名称:xarray,代码行数:7,代码来源:test_backends.py


示例9: retrieve

    def retrieve(path, isel='all', lazy=True):
        path = Path(path)
        try:
            data = open_dataset(path / "data.nc")
            lazy = True
        except FileNotFoundError:
            data = open_mfdataset(path / "data*.nc",
                                  concat_dim="t").sortby("t")
        try:
            with open(Path(path) / 'metadata.yml', 'r') as yaml_file:
                metadata = yaml.load(yaml_file)
        except FileNotFoundError:
            # Ensure retro-compatibility with older version
            with open(path.glob("Treant.*.json")[0]) as f:
                metadata = json.load(f)["categories"]

        if isel == 'last':
            data = data.isel(t=-1)
        elif isel == 'all':
            pass
        elif isinstance(isel, dict):
            data = data.isel(**isel)
        else:
            data = data.isel(t=isel)

        if not lazy:
            return FieldsData(data=data.load(),
                              metadata=AttrDict(**metadata))

        return FieldsData(data=data,
                          metadata=AttrDict(**metadata))
开发者ID:celliern,项目名称:triflow,代码行数:31,代码来源:container.py


示例10: test_variable_map

    def test_variable_map(self):
        fileName = str(self.datadir.join('example_jan.nc'))
        varMap = {
            'avgSurfaceTemperature':
                ['time_avg_avgValueWithinOceanRegion_avgSurfaceTemperature',
                 'other_string',
                 'yet_another_string'],
            'daysSinceStartOfSim':
                ['time_avg_daysSinceStartOfSim',
                 'xtime',
                 'something_else'],
            'avgLayerTemperature':
                ['time_avg_avgValueWithinOceanLayerRegion_avgLayerTemperature',
                 'test1',
                 'test2'],
            'Time': [['xtime_start', 'xtime_end'],
                     'time_avg_daysSinceStartOfSim']}

        varList = ['avgSurfaceTemperature', 'avgLayerTemperature',
                   'refBottomDepth', 'daysSinceStartOfSim']

        # preprocess_mpas will use varMap to map the variable names from their
        # values in the file to the desired values in varList
        ds = xr.open_mfdataset(
            fileName,
            preprocess=lambda x: mpas_xarray.preprocess_mpas(
                x,
                timestr='Time',
                onlyvars=varList,
                yearoffset=1850,
                varmap=varMap))

        # make sure the remapping happened as expected
        self.assertEqual(sorted(ds.data_vars.keys()), sorted(varList))
开发者ID:toddringler,项目名称:MPAS-Analysis,代码行数:34,代码来源:test_mpas_xarray.py


示例11: month_count_concat

def month_count_concat():
    msg_folder = cnst.GRIDSAT
    fname = 'aggs/gridsat_WA_-65_monthly_count_-40base_15-21UTC_1000km2.nc'
    da = xr.open_mfdataset(cnst.GRIDSAT + 'gridsat_WA_-40_1000km2_15-21UTC*_monthSum.nc')

    enc = {'tir': {'complevel': 5, 'zlib': True}}
    da.to_netcdf(msg_folder + fname, encoding=enc)
开发者ID:cornkle,项目名称:proj_CEH,代码行数:7,代码来源:gridsat_postproc.py


示例12: open_cchdo_as_mfdataset

def open_cchdo_as_mfdataset(paths, target_pressure,
                            pressure_coord='pressure',
                            concat_dim='time'):
    """Open cchdo hydrographic data in netCDF format, interpolate to
    specified pressures, and combine as an xarray dataset
    
    Parameters
    ----------
    paths : str or sequence
        Either a string glob in the form "path/to/my/files/*.nc" or an explicit
        list of files to open.
    target_pressure : arraylike
        Target pressure to which all casts are interpolated
    pressure_coord : str
        Name of the coordinate variable for pressure
    concat_dim : str
        Name of the dimension along which to concatenate casts
        
    Returns
    -------
    ds : xarray Dataset
    """
   
    # add time if missing
    timefun = _maybe_add_time_coord
    # create interpolation function for pressure
    interpfun = functools.partial(interp_coordinate,
                interp_coord=pressure_coord, interp_data=target_pressure)
    # create renaming function for concatenation
    renamefun = functools.partial(rename_0d_coords, new_dim=concat_dim)
    # compose together
    ppfun = compose(interpfun, renamefun, timefun)
    #paths = os.path.join(ddir, match_pattern)
    return xr.open_mfdataset(paths, concat_dim=concat_dim, preprocess=ppfun)
开发者ID:rabernat,项目名称:ctd2xray,代码行数:34,代码来源:cchdo.py


示例13: _load_data_from_disk

def _load_data_from_disk(file_set, preprocess_func=lambda ds: ds,
                         data_vars='minimal', coords='minimal',
                         grid_attrs=None, **kwargs):
    """Load a Dataset from a list or glob-string of files.

    Datasets from files are concatenated along time,
    and all grid attributes are renamed to their aospy internal names.

    Parameters
    ----------
    file_set : list or str
        List of paths to files or glob-string
    preprocess_func : function (optional)
        Custom function to call before applying any aospy logic
        to the loaded dataset
    data_vars : str (default 'minimal')
        Mode for concatenating data variables in call to ``xr.open_mfdataset``
    coords : str (default 'minimal')
        Mode for concatenating coordinate variables in call to
        ``xr.open_mfdataset``.
    grid_attrs : dict
        Overriding dictionary of grid attributes mapping aospy internal
        names to names of grid attributes used in a particular model.

    Returns
    -------
    Dataset
    """
    apply_preload_user_commands(file_set)
    func = _preprocess_and_rename_grid_attrs(preprocess_func, grid_attrs,
                                             **kwargs)
    return xr.open_mfdataset(file_set, preprocess=func, concat_dim=TIME_STR,
                             decode_times=False, decode_coords=False,
                             mask_and_scale=True, data_vars=data_vars,
                             coords=coords)
开发者ID:spencerahill,项目名称:aospy,代码行数:35,代码来源:data_loader.py


示例14: saveMonthly18

def saveMonthly18():
    msg_folder = '/users/global/cornkle/data/OBS/gridsat/gridsat_netcdf/z18_panAfrica/'

    da = xr.open_mfdataset(msg_folder+'gridsat_WA_*18UTC.nc')
    da = da.where((da<=-40) & (da>=-110))
    da = da.resample('m', dim='time', how='mean')
    da.to_netcdf(msg_folder+'gridsat_monthly_18UTC.nc')
开发者ID:cornkle,项目名称:proj_CEH,代码行数:7,代码来源:saveGridsat_panAf.py


示例15: data

 def data(self):
     try:
         if self.path:
             return open_mfdataset(self.path / "data*.nc")
         return self._concat_fields(self._cached_data)
     except OSError:
         return
开发者ID:celliern,项目名称:triflow,代码行数:7,代码来源:container.py


示例16: main

def main(files, out):
    """
    files: url to an .nc/.ncml file or the path to a text file containing .nc/.ncml links. A # at the front will skip links in the text file.
    out: Directory to save plots
    """
    fname, ext = os.path.splitext(files)
    if ext in '.nc':
        list_files = [files]
    elif ext in '.ncml':
        list_files = [files]
    else:
        list_files = read_file(files)

    stream_vars = pf.load_variable_dict(var='eng')  # load engineering variables
    # for nc in list_files:
    #     print nc

        # the engine that xarray uses can be changed as specified here 
        # http://xarray.pydata.org/en/stable/generated/xarray.open_dataset.html#xarray.open_dataset

    with xr.open_mfdataset(list_files, engine='netcdf4') as ds_disk:

        # change dimensions from 'obs' to 'time'
        ds_disk = ds_disk.swap_dims({'obs': 'time'})
        ds_variables = ds_disk.data_vars.keys()  # List of dataset variables
        stream = ds_disk.stream  # List stream name associated with the data
        title_pre = mk_str(ds_disk.attrs, 't')  # , var, tt0, tt1, 't')
        save_pre = mk_str(ds_disk.attrs, 's')  # , var, tt0, tt1, 's')
        save_dir = os.path.join(out, ds_disk.subsite, ds_disk.node, ds_disk.stream, 'pcolor')
        cf.create_dir(save_dir)

        # t0, t1 = cf.get_rounded_start_and_end_times(ds_disk['time'].data)
        # tI = t0 + t1 - (t0 / 2)
        # time_list = [[t0, t1], [t0, tI], [tI, t1]]
        # time_list = [[t0, t1]]

        # for period in time_list:
        #     tt0 = period[0]
        #     tt1 = period[1]
        #     sub_ds = ds_disk.sel(time=slice(str(tt0), str(tt1)))
        bins = ds_disk['bin_depths']
        north = ds_disk['northward_seawater_velocity']
        east = ds_disk['eastward_seawater_velocity']
        # up = ds_disk['upward_seawater_velocity']
        # error = ds_disk['error_velocity']

        time = dict(data=ds_disk['time'].data, info=dict(label=ds_disk['time'].standard_name, units='GMT'))
        bins = dict(data=bins.data.T, info=dict(label=bins.long_name, units=bins.units))
        north = dict(data=north.data.T, info=dict(label=north.long_name, units=north.units))
        east = dict(data=east.data.T, info=dict(label=east.long_name, units=east.units))
        # up = dict(data=up.data.T, info=dict(label=up.long_name, units=up.units))
        # error = dict(data=error.data.T, info=dict(label=error.long_name, units=error.units))

        sname = save_pre + 'ADCP'
        title = title_pre
        fig, axs = pf.adcp(time, bins, north, east, title)
        pf.resize(width=12, height=8.5)  # Resize figure
        pf.save_fig(save_dir, sname, res=250)  # Save figure
        plt.close('all')
开发者ID:leilabbb,项目名称:plot-nc-ooi,代码行数:59,代码来源:plot_adcp.py


示例17: test_open_mfdataset

    def test_open_mfdataset(self):
        original = Dataset({'foo': ('x', np.random.randn(10))})
        with create_tmp_file() as tmp1:
            with create_tmp_file() as tmp2:
                original.isel(x=slice(5)).to_netcdf(tmp1)
                original.isel(x=slice(5, 10)).to_netcdf(tmp2)
                with open_mfdataset([tmp1, tmp2]) as actual:
                    self.assertIsInstance(actual.foo.variable.data, da.Array)
                    self.assertEqual(actual.foo.variable.data.chunks,
                                     ((5, 5),))
                    self.assertDatasetAllClose(original, actual)
                with open_mfdataset([tmp1, tmp2], chunks={'x': 3}) as actual:
                    self.assertEqual(actual.foo.variable.data.chunks,
                                     ((3, 2, 3, 2),))

        with self.assertRaisesRegexp(IOError, 'no files to open'):
            open_mfdataset('foo-bar-baz-*.nc')
开发者ID:ashang,项目名称:xarray,代码行数:17,代码来源:test_backends.py


示例18: read_var_in_memory

def read_var_in_memory(dir, common_suffix="daily.nc", varname="lake_ice_fraction"):
    """
    :param dir:
    :param common_suffix:
    """
    with xarray.open_mfdataset(f"{dir}/*{common_suffix}") as ds:
        d_arr = ds[varname].load()
        return d_arr
开发者ID:guziy,项目名称:RPN,代码行数:8,代码来源:cc_lake_ice_fraction.py


示例19: test_save_mfdataset_roundtrip

 def test_save_mfdataset_roundtrip(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     datasets = [original.isel(x=slice(5)),
                 original.isel(x=slice(5, 10))]
     with create_tmp_file() as tmp1:
         with create_tmp_file() as tmp2:
             save_mfdataset(datasets, [tmp1, tmp2])
             with open_mfdataset([tmp1, tmp2]) as actual:
                 self.assertDatasetIdentical(actual, original)
开发者ID:ashang,项目名称:xarray,代码行数:9,代码来源:test_backends.py


示例20: test_load_mpas_xarray_timeSeriesStats_datasets

def test_load_mpas_xarray_timeSeriesStats_datasets(path): #{{{
    ds = xr.open_mfdataset(path, preprocess=preprocess_mpas_timeSeriesStats)
    ds = remove_repeated_time_index(ds)
    ds2 = xr.open_mfdataset(path, preprocess=preprocess_mpas)
    ds2 = remove_repeated_time_index(ds2)

    # make a simple plot from the data
    def plot_data(ds):
        var = ds["timeSeriesStatsMonthly_avg_iceAreaCell_1"]
        return var.where(var > 0).mean('nCells').plot()

    plot_data(ds)
    plot_data(ds2)
    plt.title("Curve centered around right times (b) \n "+\
              "Curve shifted towards end of avg period (g)")
    plt.show()

    return #}}}
开发者ID:pwolfram,项目名称:MPAS-Analysis,代码行数:18,代码来源:mpas_xarray.py



注:本文中的xarray.open_mfdataset函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python xarray.DataArray类代码示例发布时间:2022-05-26
下一篇:
Python xarray.open_dataset函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap