• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python xray.concat函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中xray.concat函数的典型用法代码示例。如果您正苦于以下问题:Python concat函数的具体用法?Python concat怎么用?Python concat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了concat函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: wrapyear

def wrapyear(data, data_prev, data_next, daymin, daymax, year=None):
    """Wrap daily data from previous and next years for extended day ranges.
    """
    daynm = atm.get_coord(data, 'day', 'name')

    def leap_adjust(data, year):
        data = atm.squeeze(data)
        ndays = 365
        if year is not None and atm.isleap(year):
            ndays += 1
        else:
            # Remove NaN for day 366 in non-leap year
            data = atm.subset(data, {'day' : (1, ndays)})
        return data, ndays

    data, ndays = leap_adjust(data, year)
    if data_prev is not None:
        data_prev, ndays_prev = leap_adjust(data_prev, year - 1)
        data_prev[daynm] = data_prev[daynm] - ndays_prev
        data_out = xray.concat([data_prev, data], dim=daynm)
    else:
        data_out = data
    if data_next is not None:
        data_next, _ = leap_adjust(data_next, year + 1)
        data_next[daynm] = data_next[daynm] + ndays
        data_out = xray.concat([data_out, data_next], dim=daynm)
    data_out = atm.subset(data_out, {daynm : (daymin, daymax)})

    return data_out
开发者ID:jenfly,项目名称:monsoon-onset,代码行数:29,代码来源:utils.py


示例2: test_concat_size0

    def test_concat_size0(self):
        data = create_test_data()
        split_data = [data.isel(dim1=slice(0, 0)), data]
        actual = concat(split_data, 'dim1')
        self.assertDatasetIdentical(data, actual)

        actual = concat(split_data[::-1], 'dim1')
        self.assertDatasetIdentical(data, actual)
开发者ID:psederberg,项目名称:xray,代码行数:8,代码来源:test_combine.py


示例3: test_concat_constant_index

 def test_concat_constant_index(self):
     # GH425
     ds1 = Dataset({'foo': 1.5}, {'y': 1})
     ds2 = Dataset({'foo': 2.5}, {'y': 1})
     expected = Dataset({'foo': ('y', [1.5, 2.5]), 'y': [1, 1]})
     for mode in ['different', 'all', ['foo']]:
         actual = concat([ds1, ds2], 'y', data_vars=mode)
         self.assertDatasetIdentical(expected, actual)
     with self.assertRaisesRegexp(ValueError, 'not equal across datasets'):
         concat([ds1, ds2], 'y', data_vars='minimal')
开发者ID:psederberg,项目名称:xray,代码行数:10,代码来源:test_combine.py


示例4: test_concat

    def test_concat(self):
        # TODO: simplify and split this test case

        # drop the third dimension to keep things relatively understandable
        data = create_test_data().drop('dim3')

        split_data = [data.isel(dim1=slice(3)),
                      data.isel(dim1=slice(3, None))]
        self.assertDatasetIdentical(data, concat(split_data, 'dim1'))

        def rectify_dim_order(dataset):
            # return a new dataset with all variable dimensions tranposed into
            # the order in which they are found in `data`
            return Dataset(dict((k, v.transpose(*data[k].dims))
                                for k, v in iteritems(dataset.data_vars)),
                           dataset.coords, attrs=dataset.attrs)

        for dim in ['dim1', 'dim2']:
            datasets = [g for _, g in data.groupby(dim, squeeze=False)]
            self.assertDatasetIdentical(data, concat(datasets, dim))
            self.assertDatasetIdentical(
                data, concat(datasets, data[dim]))
            self.assertDatasetIdentical(
                data, concat(datasets, data[dim], coords='minimal'))

            datasets = [g for _, g in data.groupby(dim, squeeze=True)]
            concat_over = [k for k, v in iteritems(data.coords)
                           if dim in v.dims and k != dim]
            actual = concat(datasets, data[dim], coords=concat_over)
            self.assertDatasetIdentical(data, rectify_dim_order(actual))

            actual = concat(datasets, data[dim], coords='different')
            self.assertDatasetIdentical(data, rectify_dim_order(actual))

        # make sure the coords argument behaves as expected
        data.coords['extra'] = ('dim4', np.arange(3))
        for dim in ['dim1', 'dim2']:
            datasets = [g for _, g in data.groupby(dim, squeeze=True)]
            actual = concat(datasets, data[dim], coords='all')
            expected = np.array([data['extra'].values
                                 for _ in range(data.dims[dim])])
            self.assertArrayEqual(actual['extra'].values, expected)

            actual = concat(datasets, data[dim], coords='different')
            self.assertDataArrayEqual(data['extra'], actual['extra'])
            actual = concat(datasets, data[dim], coords='minimal')
            self.assertDataArrayEqual(data['extra'], actual['extra'])

        # verify that the dim argument takes precedence over
        # concatenating dataset variables of the same name
        dim = (2 * data['dim1']).rename('dim1')
        datasets = [g for _, g in data.groupby('dim1', squeeze=False)]
        expected = data.copy()
        expected['dim1'] = dim
        self.assertDatasetIdentical(expected, concat(datasets, dim))
开发者ID:psederberg,项目名称:xray,代码行数:55,代码来源:test_combine.py


示例5: test_concat_coords

 def test_concat_coords(self):
     data = Dataset({'foo': ('x', np.random.randn(10))})
     expected = data.assign_coords(c=('x', [0] * 5 + [1] * 5))
     objs = [data.isel(x=slice(5)).assign_coords(c=0),
             data.isel(x=slice(5, None)).assign_coords(c=1)]
     for coords in ['different', 'all', ['c']]:
         actual = concat(objs, dim='x', coords=coords)
         self.assertDatasetIdentical(expected, actual)
     for coords in ['minimal', []]:
         with self.assertRaisesRegexp(ValueError, 'not equal across'):
             concat(objs, dim='x', coords=coords)
开发者ID:psederberg,项目名称:xray,代码行数:11,代码来源:test_combine.py


示例6: test_concat_do_not_promote

    def test_concat_do_not_promote(self):
        # GH438
        objs = [Dataset({'y': ('t', [1])}, {'x': 1}),
                Dataset({'y': ('t', [2])}, {'x': 1})]
        expected = Dataset({'y': ('t', [1, 2])}, {'x': 1, 't': [0, 0]})
        actual = concat(objs, 't')
        self.assertDatasetIdentical(expected, actual)

        objs = [Dataset({'y': ('t', [1])}, {'x': 1}),
                Dataset({'y': ('t', [2])}, {'x': 2})]
        with self.assertRaises(ValueError):
            concat(objs, 't', coords='minimal')
开发者ID:psederberg,项目名称:xray,代码行数:12,代码来源:test_combine.py


示例7: cen_diff

    def cen_diff(cls, arr, dim, spacing=1, is_coord=False, do_edges_one_sided=False):
        """Centered differencing of the DataArray or Dataset.

        :param arr: Data to be center-differenced.
        :type arr: `xray.DataArray` or `xray.Dataset`
        :param str dim: Dimension over which to perform the differencing.
        :param int spacing: How many gridpoints over to use.  Size of resulting
                            array depends on this value.
        :param do_edges_one_sided: Whether or not to fill in the edge cells
                                   that don't have the needed neighbor cells
                                   for the stencil.  If `True`, use one-sided
                                   differencing with the same order of accuracy
                                   as `order`, and the outputted array is the
                                   same shape as `arr`.

                                   If `False`, the outputted array has a length
                                   in the computed axis reduced by `order`.
        """
        if spacing < 1:
            raise ValueError("Centered differencing cannot have spacing < 1")
        left = arr.isel(**{dim: slice(0, -spacing)})
        right = arr.isel(**{dim: slice(spacing, None)})
        # Centered differencing = sum of intermediate forward differences
        diff = cls.fwd_diff1(right, dim, is_coord=is_coord) + cls.bwd_diff1(left, dim, is_coord=is_coord)
        if do_edges_one_sided:
            left = arr.isel(**{dim: slice(0, 2)})
            right = arr.isel(**{dim: slice(-2, None)})
            diff_left = cls.fwd_diff1(left, dim, is_coord=is_coord)
            diff_right = cls.bwd_diff1(right, dim, is_coord=is_coord)
            diff = xray.concat([diff_left, diff, diff_right], dim=dim)
        return diff
开发者ID:spencerahill,项目名称:infinite-diff,代码行数:31,代码来源:finite_diff.py


示例8: wrapyear_all

def wrapyear_all(data, daymin, daymax):
    """Wrap daily data to extended ranges over each year in yearly data."""

    def extract_year(data, year, years):
        if year in years:
            data_out = atm.subset(data, {'year' : (year, year)})
        else:
            data_out = None
        return data_out

    daynm = atm.get_coord(data, 'day', 'name')
    days = np.arange(daymin, daymax + 1)
    days = xray.DataArray(days, name=daynm, coords={daynm : days})
    years = atm.get_coord(data, 'year')
    yearnm = atm.get_coord(data, 'year', 'name')
    for y, year in enumerate(years):
        year_prev, year_next = year - 1, year + 1
        var = extract_year(data, year, years)
        var_prev = extract_year(data, year_prev, years)
        var_next = extract_year(data, year_next, years)
        var_out = wrapyear(var, var_prev, var_next, daymin, daymax, year)
        var_out = atm.expand_dims(var_out, 'year', year, axis=0)
        var_out = var_out.reindex_like(days)
        if y == 0:
            data_out = var_out
        else:
            data_out = xray.concat([data_out, var_out], dim=yearnm)

    return data_out
开发者ID:jenfly,项目名称:monsoon-onset,代码行数:29,代码来源:utils.py


示例9: concat_to_nc

def concat_to_nc( filelist, output_filename, dim='time', begin_time=None, end_time=None, nc_format='NETCDF4', **kwargs ):
	'''
	take list of consecutive netcdf files (made for CMIP5 data) and stack them into a 
	single larger netcdf file.  This was necessary to overcome some bugginess in how 
	MFDataset is dealing with different calendar units on different files.  This is 
	technically valid CF-Compliant metadata, but is tricky to work with.  This hack allows
	us to get around some of this unpredictable behavior.

	PARAMETERS:
	-----------
	filelist = [list] list of string file paths to the sorted netcdf files to stack together
	output_filename = [str] path to and name of the output file to be generated (.nc extension)
	dim = [str] dimension to stack on -- default is 'time'
	begin_time = [str] PANDAS style datetime string syntax -- used in xray
	end_time = [str] PANDAS style datetime string syntax -- used in xray
	format = [str] output NetCDF format desired. valid strings are:
					'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', 'NETCDF3_CLASSIC'
					default is 'NETCDF4'
	**kwargs -- potential future arguments or overloaded args to pass through (none implemented)

	RETURNS:
	--------

	output_filename as string, with the important side-effect of writing data to disk

	'''
	import xray
	with xray.concat([ xray.open_dataset( i ).load() for i in filelist ], dim ) as ds:
		# time slicer condition
		if begin_time != None and end_time != None:
			ds = ds.loc[ { dim:slice( begin_time, end_time ) } ]
		if os.path.exists( output_filename ):
			os.remove( output_filename )
		ds.to_netcdf( output_filename, mode='w', format=nc_format )
	return output_filename
开发者ID:EarthScientist,项目名称:alfresco_inputs,代码行数:35,代码来源:hur_ar5_model_data_preprocess.py


示例10: compile_datasets

def compile_datasets(datasets):
    """
    Given a list of datasets. Possibly containing None. Return a single
    dataset with unique indexes and sorted by the 'time' parameter
    :param datasets: :return:
    """
    # filter out the Nones
    datasets = filter(None, datasets)
    if len(datasets) == 0:
        return None
    datasets.sort(key=lambda val: val['time'].values[0])
    # now determine if they are in order or not..
    start = 0
    end = 0
    idx = 0
    for ds in datasets:
        ns = ds['time'].min()
        ne = ds['time'].max()
        # Determine if the max and the min are all in order
        start = ns
        end = ne
        new_index = [i for i in range(idx, idx + len(ds['index']))]
        ds['index'] = new_index
        idx = new_index[-1] + 1
    dataset = xray.concat(datasets, dim='index')
    sorted_idx = dataset.time.argsort()
    dataset = dataset.reindex({'index': sorted_idx})
    return dataset
开发者ID:r-mbruckner,项目名称:stream_engine,代码行数:28,代码来源:common.py


示例11: read_data

def read_data(data_dir, lat, lon, resample=None):
    files = sorted([os.path.join(data_dir, f) for f in os.listdir(data_dir)])
    dss = [xr.open_dataset(f).sel(lat=lat, lon=lon, method='nearest') for f in files]
    ds = xr.concat([dr.load() for dr in dss], 'time')
    if resample is not None:
        ds = ds.resample(resample, 'time')
    return ds
开发者ID:tjvandal,项目名称:soil-moisture-prediction,代码行数:7,代码来源:create_dataset.py


示例12: test_concat

    def test_concat(self):
        self.ds['bar'] = Variable(['x', 'y'], np.random.randn(10, 20))
        foo = self.ds['foo']
        bar = self.ds['bar']
        # from dataset array:
        expected = DataArray(np.array([foo.values, bar.values]),
                             dims=['w', 'x', 'y'])
        actual = concat([foo, bar], 'w')
        self.assertDataArrayEqual(expected, actual)
        # from iteration:
        grouped = [g for _, g in foo.groupby('x')]
        stacked = concat(grouped, self.ds['x'])
        self.assertDataArrayIdentical(foo, stacked)

        with self.assertRaisesRegexp(ValueError, 'not identical'):
            concat([foo, bar], compat='identical')
开发者ID:josephwinston,项目名称:xray,代码行数:16,代码来源:test_dataarray.py


示例13: fetch_full_san_data

def fetch_full_san_data(stream_key, time_range, location_metadata=None):
    """
    Given a time range and stream key.  Genereate all data in the inverval using data
    from the SAN.
    :param stream_key:
    :param time_range:
    :return:
    """
    if location_metadata is None:
        location_metadata = get_san_location_metadata(stream_key, time_range)
    # get which bins we can gather data from
    ref_des_dir, dir_string = get_SAN_directories(stream_key, split=True)
    if not os.path.exists(ref_des_dir):
        log.warning("Reference Designator does not exist in offloaded DataSAN")
        return None
    data = []
    next_index = 0
    for time_bin in location_metadata.bin_list:
        direct = dir_string.format(time_bin)
        if os.path.exists(direct):
            # get data from all of the  deployments
            deployments = os.listdir(direct)
            for deployment in deployments:
                full_path = os.path.join(direct, deployment)
                if os.path.isdir(full_path):
                    new_data = get_deployment_data(full_path, stream_key.stream_name, -1, time_range,
                                                   index_start=next_index)
                    if new_data is not None:
                        data.append(new_data)
                        # Keep track of indexes so they are unique in the final dataset
                        next_index += len(new_data['index'])
    if len(data) == 0:
        return None
    return xray.concat(data, dim='index')
开发者ID:r-mbruckner,项目名称:stream_engine,代码行数:34,代码来源:san.py


示例14: combinevars

    def combinevars(ds_in,dat_vars,new_dim_name='new_dim',combinevarname='new_var'):
        ds_out = xray.Dataset()
        ds_out = xray.concat([ds_in[dv] for dv in dat_vars],dim='new_dim')
        ds_out = ds_out.rename({'new_dim': new_dim_name})
        ds_out.coords[new_dim_name] = dat_vars
        ds_out.name = combinevarname

        return ds_out
开发者ID:Mountain-Hydrology-Research-Group,项目名称:MesoPy,代码行数:8,代码来源:Example_download_MesoWest_data_to_Netcdf.py


示例15: read_all

    def read_all(self,channels, start_offset,  end_offset, buffer):
        evs = self.events

        raw_bin_wrappers, original_eeg_files = self.__create_bin_readers()

        # we need to create rawbinwrappers first to figure out sample rate before calling __compute_time_series_length()
        time_series_length = self.__compute_time_series_length()

        time_series_data = np.empty((len(channels),len(evs),time_series_length),
                             dtype=np.float)*np.nan

        events = []

        newdat_list = []

        # for s,src in enumerate(usources):
        for s,(src,eegfile) in enumerate(zip(raw_bin_wrappers,original_eeg_files)):
            ind = np.atleast_1d( evs.eegfile == eegfile)

            if len(ind) == 1:
                events.append(evs[0])
            else:
                events.append(evs[ind])

            # print event_offsets
            #print "Loading %d events from %s" % (ind.sum(),src)
            # get the timeseries for those events
            newdat = src.get_event_data_xray_simple(channels=channels,events=events,
                                                    start_offset=start_offset,end_offset=end_offset,buffer=buffer)

            newdat_list.append(newdat)


        start_extend_time = time.time()
        #new code
        eventdata = xray.concat(newdat_list,dim='events')
        end_extend_time = time.time()


        # concatenate (must eventually check that dims match)
        # ORIGINAL CODE
        tdim = eventdata['time']
        cdim = eventdata['channels']
        # srate = eventdata.samplerate
        srate = eventdata.attrs['samplerate']

        eventdata_xray = eventdata
        # eventdata_xray = xray.DataArray(np.squeeze(eventdata.values), coords=[cdim,tdim], dims=['channels','time'])
        # eventdata_xray.attrs['samplerate'] = eventdata.attrs['samplerate']


        if not self.keep_buffer:
            # trimming buffer data samples
            number_of_buffer_samples =  self.get_number_of_samples_for_interval(self.buffer_time)
            if number_of_buffer_samples > 0:
                eventdata_xray = eventdata_xray[:,:,number_of_buffer_samples:-number_of_buffer_samples]

        return eventdata_xray
开发者ID:isaac-ped,项目名称:ptsa_new,代码行数:58,代码来源:TimeSeriesEEGReader.py


示例16: filter

    def filter(self):

        event_data_dict = OrderedDict()

        for eegfile_name, data in self.data_dict.items():

            evs = self.events[self.events.eegfile == eegfile_name]

            samplerate = data.attrs['samplerate']

            # used in constructing time_axis
            offset_time_array = data['time'].values['eegoffset']

            event_chunk_size, start_point_shift = self.get_event_chunk_size_and_start_point_shift(ev=evs[0],
                                                                                                  samplerate=samplerate,
                                                                                                  offset_time_array=offset_time_array)

            event_time_axis = np.linspace(-self.buffer + self.time_shift,
                                          self.event_duration + self.buffer + self.time_shift,
                                          event_chunk_size)

            data_list = []

            shape = None

            for i, ev in enumerate(evs):
                # print ev.eegoffset
                start_chop_pos = np.where(offset_time_array >= ev.eegoffset)[0][0]
                start_chop_pos += start_point_shift
                selector_array = np.arange(start=start_chop_pos, stop=start_chop_pos + event_chunk_size)

                # ev_array = eeg_session_data[:,:,selector_array] # ORIG CODE

                chopped_data_array = data.isel(time=selector_array)

                chopped_data_array['time'] = event_time_axis
                chopped_data_array['events'] = [i]

                data_list.append(chopped_data_array)

                # print i

            ev_concat_data = xray.concat(data_list, dim='events')

            # replacing simple events axis (consecutive integers) with recarray of events
            ev_concat_data['events'] = evs

            ev_concat_data.attrs['samplerate'] = samplerate
            ev_concat_data.attrs['time_shift'] = self.time_shift
            ev_concat_data.attrs['event_duration'] = self.event_duration
            ev_concat_data.attrs['buffer'] = self.buffer

            event_data_dict[eegfile_name] = TimeSeriesX(ev_concat_data)

            break  # REMOVE THIS

        return event_data_dict
开发者ID:isaac-ped,项目名称:ptsa_new,代码行数:57,代码来源:EventDataChopper.py


示例17: test_concat_lazy

    def test_concat_lazy(self):
        import dask.array as da

        arrays = [DataArray(
            da.from_array(InaccessibleArray(np.zeros((3, 3))), 3),
            dims=['x', 'y']) for _ in range(2)]
        # should not raise
        combined = concat(arrays, dim='z')
        self.assertEqual(combined.shape, (2, 3, 3))
        self.assertEqual(combined.dims, ('z', 'x', 'y'))
开发者ID:psederberg,项目名称:xray,代码行数:10,代码来源:test_combine.py


示例18: ssn_average

def ssn_average(var, onset, retreat, season):
    years = var['year'].values
    for y, year in enumerate(years):
        days = season_days(season, year, onset.values[y], retreat.values[y])
        var_yr = atm.subset(var, {'year' : (year, year)}, squeeze=False)
        var_yr = var_yr.sel(dayrel=days).mean(dim='dayrel')
        if y == 0:
            var_out = var_yr
        else:
            var_out = xray.concat([var_out, var_yr], dim='year')
    return var_out
开发者ID:jenfly,项目名称:monsoon-onset,代码行数:11,代码来源:save-dailyrel-regression.py


示例19: test_lazy_array

    def test_lazy_array(self):
        u = self.eager_array
        v = self.lazy_array

        self.assertLazyAndAllClose(u, v)
        self.assertLazyAndAllClose(-u, -v)
        self.assertLazyAndAllClose(u.T, v.T)
        self.assertLazyAndAllClose(u.mean(), v.mean())
        self.assertLazyAndAllClose(1 + u, 1 + v)

        actual = concat([v[:2], v[2:]], "x")
        self.assertLazyAndAllClose(u, actual)
开发者ID:jjhelmus,项目名称:xray,代码行数:12,代码来源:test_dask.py


示例20: concat_plevs

def concat_plevs(datadir, year, varnm, plevs, pdim, version):
    pname = 'Height'
    for i, plev in enumerate(plevs):
        filenm = datafile(datadir, varnm, plev, year, version)
        print('Reading ' + filenm)
        with xray.open_dataset(filenm) as ds:
            var_in = ds[varnm].load()
            var_in = atm.expand_dims(var_in, pname, plev, axis=1)
        if i == 0:
            var = var_in
        else:
            var = xray.concat([var, var_in], dim=pname)
    return var
开发者ID:jenfly,项目名称:monsoon-onset,代码行数:13,代码来源:save-dp.py



注:本文中的xray.concat函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python xray.open_dataset函数代码示例发布时间:2022-05-26
下一篇:
Python x11_server_base.X11ServerBase类代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap