• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.nanargmin函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.nanargmin函数的典型用法代码示例。如果您正苦于以下问题:Python nanargmin函数的具体用法?Python nanargmin怎么用?Python nanargmin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了nanargmin函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: divide_spans

def divide_spans(complete_df):
	'''
	Divide individual variables into useful spans and return an array of transition times.
	'''
	span_variables = list(complete_df.key_measures)
	span_cutoffs = []
	for a_variable in span_variables:
		data_values = complete_df.mloc(measures = [a_variable])[:, 0, :]
		# Middle overall value.
		if a_variable in ['intensity_95', 'intensity_90', 'intensity_80', 'life_texture']:
			cutoff_value = np.ndarray.flatten(data_values)
			cutoff_value = cutoff_value[~np.isnan(cutoff_value)]
			cutoff_value = np.mean(cutoff_value)
			span_side = np.abs(data_values - cutoff_value)
			span_side = np.nanargmin(span_side, axis = 1)
			span_cutoffs.append(span_side)
		# Overall young adult value is 'healthy'.
		elif a_variable in ['bulk_movement']:
			young_adult = np.abs(complete_df.mloc(measures = ['egg_age'])[:, 0, :])
			young_adult = np.nanargmin(young_adult, axis = 1)
			cutoff_value = np.array([data_values[i, young_adult[i]] for i in range(0, young_adult.shape[0])])
			cutoff_value = np.mean(cutoff_value)/2
			span_side = np.abs(data_values - cutoff_value)
			span_side = np.nanargmin(span_side, axis = 1)
			span_cutoffs.append(span_side)
		# Point of own first maximum value.
		elif a_variable in ['total_size', 'cumulative_eggs', 'cumulative_area', 'adjusted_size']:
			span_side = np.nanargmax(data_values, axis = 1)
			span_cutoffs.append(span_side)
		# Raise an exception if I can't find the variable.
		else:
			raise BaseException('Can\'t find ' + a_variable + '.')
	return (span_variables, np.array(span_cutoffs))
开发者ID:zhang-wb,项目名称:wormPhysiology,代码行数:33,代码来源:computeStatistics.py


示例2: getPerpContourInd

def getPerpContourInd(skeleton, skel_ind, contour_side1, contour_side2, contour_width):
    #get the closest point in the contour from a line perpedicular to the skeleton.
    
    #get the slop of a line perpendicular to the keleton
    dR = skeleton[skel_ind+1] - skeleton[skel_ind-1]
    #m = dR[1]/dR[0]; M = -1/m
    a = -dR[0]
    b = +dR[1]
    
    c = b*skeleton[skel_ind,1]-a*skeleton[skel_ind,0]
    
    max_width_squared = np.max(contour_width)**2
    #modified from https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
    #a = M, b = -1
    
    #make sure we are not selecting a point that get traversed by a coiled worm
    dist2cnt1 = np.sum((contour_side1-skeleton[skel_ind])**2,axis=1)
    d1 = np.abs(a*contour_side1[:,0] - b*contour_side1[:,1]+ c)
    d1[dist2cnt1>max_width_squared] = np.nan
    cnt1_ind = np.nanargmin(d1)
    
    dist2cnt2 = np.sum((contour_side2-skeleton[skel_ind])**2,axis=1)
    d2 = np.abs(a*contour_side2[:,0] - b*contour_side2[:,1]+ c)
    d2[dist2cnt2>max_width_squared] = np.nan
    cnt2_ind = np.nanargmin(d2)
    return cnt1_ind, cnt2_ind
开发者ID:ver228,项目名称:Work_In_Progress,代码行数:26,代码来源:filter_headtail_ratio.py


示例3: test_reductions_2D_int

def test_reductions_2D_int():
    x = np.arange(1, 122).reshape((11, 11)).astype('i4')
    a = da.from_array(x, chunks=(4, 4))

    reduction_2d_test(da.sum, a, np.sum, x)
    reduction_2d_test(da.prod, a, np.prod, x)
    reduction_2d_test(da.mean, a, np.mean, x)
    reduction_2d_test(da.var, a, np.var, x, False)  # Difference in dtype algo
    reduction_2d_test(da.std, a, np.std, x, False)  # Difference in dtype algo
    reduction_2d_test(da.min, a, np.min, x, False)
    reduction_2d_test(da.max, a, np.max, x, False)
    reduction_2d_test(da.any, a, np.any, x, False)
    reduction_2d_test(da.all, a, np.all, x, False)

    reduction_2d_test(da.nansum, a, np.nansum, x)
    with ignoring(AttributeError):
        reduction_2d_test(da.nanprod, a, np.nanprod, x)
    reduction_2d_test(da.nanmean, a, np.mean, x)
    reduction_2d_test(da.nanvar, a, np.nanvar, x, False)  # Difference in dtype algo
    reduction_2d_test(da.nanstd, a, np.nanstd, x, False)  # Difference in dtype algo
    reduction_2d_test(da.nanmin, a, np.nanmin, x, False)
    reduction_2d_test(da.nanmax, a, np.nanmax, x, False)

    assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
    assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
    assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
    assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
    assert eq(da.argmax(a, axis=1), np.argmax(x, axis=1))
    assert eq(da.argmin(a, axis=1), np.argmin(x, axis=1))
    assert eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))
    assert eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))
开发者ID:BabeNovelty,项目名称:dask,代码行数:31,代码来源:test_reductions.py


示例4: get_spans

def get_spans(adult_df, a_variable, method = 'young', fraction = 0.5, reverse_direction = False):
	'''
	Get healthspans as defined by young adult function for a_variable.
	'''
	data_values = adult_df.mloc(measures = [a_variable])[:, 0, :]
	if reverse_direction:
		data_values = data_values*-1
	if method == 'young':
		young_adult = np.abs(adult_df.mloc(measures = ['egg_age'])[:, 0, :])
		young_adult = np.nanargmin(young_adult, axis = 1)
		cutoff_value = np.array([data_values[i, young_adult[i]] for i in range(0, young_adult.shape[0])])
		cutoff_value = np.mean(cutoff_value)*fraction
	if method == 'overall_time':
		all_data = np.ndarray.flatten(data_values)
		all_data = all_data[~np.isnan(all_data)]
		cutoff_value = np.percentile(all_data, (1-fraction)*100)

	# Compute the time of crossing the health-gero threshold.
	span_side = data_values - cutoff_value
	health_span_length = np.nanargmin(np.abs(span_side), axis = 1)
	health_span_length = np.array([adult_df.ages[a_time]*24 for a_time in health_span_length])

	# Deal with the special cases of individuals that spend their entire lives in health or gerospan.
	span_list = [a_span[~np.isnan(a_span)] for a_span in span_side]
	only_health = np.array([(a_span > 0).all() for a_span in span_list])
	only_gero = np.array([(a_span < 0).all() for a_span in span_list])
	adultspans = selectData.get_adultspans(adult_df)
	health_span_length[only_health] = adultspans[only_health]
	health_span_length[only_gero] = 0
	return health_span_length
开发者ID:zhang-wb,项目名称:wormPhysiology,代码行数:30,代码来源:computeStatistics.py


示例5: test_reductions_1D

def test_reductions_1D(dtype):
    x = np.arange(5).astype(dtype)
    a = da.from_array(x, chunks=(2,))

    reduction_1d_test(da.sum, a, np.sum, x)
    reduction_1d_test(da.prod, a, np.prod, x)
    reduction_1d_test(da.mean, a, np.mean, x)
    reduction_1d_test(da.var, a, np.var, x)
    reduction_1d_test(da.std, a, np.std, x)
    reduction_1d_test(da.min, a, np.min, x, False)
    reduction_1d_test(da.max, a, np.max, x, False)
    reduction_1d_test(da.any, a, np.any, x, False)
    reduction_1d_test(da.all, a, np.all, x, False)

    reduction_1d_test(da.nansum, a, np.nansum, x)
    with ignoring(AttributeError):
        reduction_1d_test(da.nanprod, a, np.nanprod, x)
    reduction_1d_test(da.nanmean, a, np.mean, x)
    reduction_1d_test(da.nanvar, a, np.var, x)
    reduction_1d_test(da.nanstd, a, np.std, x)
    reduction_1d_test(da.nanmin, a, np.nanmin, x, False)
    reduction_1d_test(da.nanmax, a, np.nanmax, x, False)

    assert eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
    assert eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
    assert eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
    assert eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))

    assert eq(da.argmax(a, axis=0, split_every=2), np.argmax(x, axis=0))
    assert eq(da.argmin(a, axis=0, split_every=2), np.argmin(x, axis=0))
    assert eq(da.nanargmax(a, axis=0, split_every=2), np.nanargmax(x, axis=0))
    assert eq(da.nanargmin(a, axis=0, split_every=2), np.nanargmin(x, axis=0))
开发者ID:bj-wangjia,项目名称:dask,代码行数:32,代码来源:test_reductions.py


示例6: mouse_drag

    def mouse_drag(self, event):
        '''

        '''

        if event.inaxes == self.ax and event.button == 1:

            # Index of nearest point
            i = np.nanargmin(((event.xdata - self.x) / self.nx) ** 2)
            j = np.nanargmin(((event.ydata - self.y) / self.ny) ** 2)

            if (i == self.last_i) and (j == self.last_j):
                return
            else:
                self.last_i = i
                self.last_j = j

            # Toggle pixel
            if self.aperture[j, i]:
                self.aperture[j, i] = 0
            else:
                self.aperture[j, i] = 1

            # Update the contour
            self.update()
开发者ID:rodluger,项目名称:everest,代码行数:25,代码来源:standalone.py


示例7: __store_estimate

 def __store_estimate(self):
     for i_method in range(0, self.n_methods):
         self.estimate[i_method, :]\
             = np.unravel_index(
                 np.nanargmin(self.criterion_result[i_method, :, :]),
                 self.criterion_result[i_method, :, :].shape)
         self.best_actvt[i_method]\
             = self.actvt_result\
             [self.estimate[i_method, 0]][self.estimate[i_method, 1]]
         self.best_base[i_method]\
             = self.base_result\
             [self.estimate[i_method, 0]][self.estimate[i_method, 1]]
         self.best_completion[i_method]\
             = self.completion_result\
             [self.estimate[i_method, 0]][self.estimate[i_method, 1]]
     if not (self.true_width == None):
         for i_method in range(0, self.n_methods):
             self.estimate_given_width[i_method]\
                 = np.nanargmin(self.criterion_result[i_method, self.true_width, :])
             self.best_actvt_given_width[i_method]\
                 = self.actvt_result\
                 [self.true_width][self.estimate_given_width[i_method]]
             self.best_base_given_width[i_method]\
                 = self.base_result\
                 [self.true_width][self.estimate_given_width[i_method]]
             self.best_completion_given_width[i_method]\
                 = self.completion_result\
                 [self.true_width][self.estimate_given_width[i_method]]
开发者ID:atsushi-suzuki,项目名称:cnmf_mdl,代码行数:28,代码来源:cnmf.py


示例8: _initialize

    def _initialize(self):
        """ Enforce bounds """
        system = self._system

        u = system.vec['u'].array
        du = system.vec['du'].array
        lower = system.vec['lb'].array
        upper = system.vec['ub'].array
        self.alpha = 1.0
        if not numpy.isnan(lower).all():
            lower_const = u + self.alpha*du - lower
            ind = numpy.nanargmin(lower_const)
            if lower_const[ind] < 0:
                self.alpha = (lower[ind] - u[ind]) / du[ind]
        if not numpy.isnan(upper).all():
            upper_const = upper - u - self.alpha*du
            ind = numpy.nanargmin(upper_const)
            if upper_const[ind] < 0:
                self.alpha = (upper[ind] - u[ind]) / du[ind]
        self.info = self.alpha

        norm0 = self._norm()
        if norm0 == 0.0:
            norm0 = 1.0
        system.vec['u'].array[:] += self.alpha * system.vec['du'].array[:]
        norm = self._norm()
        return norm0, norm
开发者ID:Daiyu506,项目名称:CMF,代码行数:27,代码来源:framework.py


示例9: dataindex

def dataindex(x_loc,y_loc,datain_loc):
    x_loc = float(x_loc)
    y_loc = float(y_loc)
    radius = np.sqrt(x_loc**(2.)+y_loc**(2.))
    angle = np.arctan(x_loc/y_loc)
    line_radius = datain_loc[np.nanargmin((np.abs(datain_loc['r']-(radius))), axis=0)]
    radius = line_radius[3]
    line_angle = datain_loc[datain_loc['r']==radius][np.nanargmin((np.abs((datain_loc[datain_loc['r']==radius]['theta'])-(angle))), axis=0)]
    angle = line_angle[4]
    return np.nanargmin((np.abs(datain_loc['r']-(radius)) + np.abs(datain_loc ['theta']-(angle))), axis=0)
开发者ID:Mylleranton,项目名称:AccretionDisks,代码行数:10,代码来源:particle_trail_forces.py


示例10: mapmean

def mapmean(tempDF, meta, name = '', option = 0): 
    import cartopy.crs as ccrs
    from cartopy.io.img_tiles import MapQuestOSM
    from mpl_toolkits.axes_grid1 import make_axes_locatable
    #fig  = plt.figure(figsize=(30, 30))
    x = meta['location:Longitude'].values
    y = meta['location:Latitude'].values
    c = tempDF[meta.index].mean()
    marker_size = 350 
    imagery = MapQuestOSM()
    fig = plt.figure(figsize=[15,15])
    ax = plt.axes(projection=imagery.crs)
    
    ax.set_extent(( meta['location:Longitude'].min()-.005, 
                   meta['location:Longitude'].max()+.005 , 
                   meta['location:Latitude'].min()-.005,
                   meta['location:Latitude'].max()+.005))
    ax.add_image(imagery, 14)

    cmap = matplotlib.cm.OrRd
    bounds = np.linspace(round((c.mean()-3)),round((c.mean()+3)),13)
    norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
    plotHandle = ax.scatter(x,y,c = c, s = marker_size, transform=ccrs.Geodetic(), 
                 cmap = cmap,
                 norm = norm)
    
    if option ==0 : 
        cbar1 = plt.colorbar(plotHandle, label = 'Temperature in $^\circ $C')
    else : 
        cbar1 = plt.colorbar(plotHandle, label = option)

    lon = x[np.nanargmax(c)]
    lat = y[np.nanargmax(c)]
    at_x, at_y = ax.projection.transform_point(lon, lat,
                                               src_crs=ccrs.Geodetic())
    plt.annotate(
        '%2.1f'%np.nanmax(c.values), xy=(at_x, at_y), #xytext=(30, 20), textcoords='offset points',
        color='black', backgroundcolor='none', size=22,
        )

    lon = x[np.nanargmin(c)]
    lat = y[np.nanargmin(c)]
    at_x, at_y = ax.projection.transform_point(lon, lat,
                                               src_crs=ccrs.Geodetic())
    plt.annotate(
        '%2.1f'%np.nanmin(c.values), xy=(at_x, at_y), #xytext=(30, 20), textcoords='offset points',
        color='black', size = 22, backgroundcolor='none')

    plt.annotate(
        '$\mu = $ %2.1f, $\sigma = $ %2.1f'%(np.nanmean(c.values), np.nanstd(c.values)), (0.01,0.01), xycoords ='axes fraction', #xytext=(30, 20), textcoords='offset points',
        color='black', size = 22, backgroundcolor='none')
    
    plt.title('Mean Temperature %s'%name)
    filename = './plots/meantempmap%s.eps'%name
    plt.savefig(filename, format = 'eps', dpi = 600)
开发者ID:gottscott,项目名称:IntraUrbanTemperatureVariabilityBaltimore,代码行数:55,代码来源:ibuttonplots.py


示例11: closest_real_time

def closest_real_time(complete_df, a_worm, a_time, egg_mode = True):
	'''
	For a_worm at a_time, find the closest real time point.
	'''
	time_split = a_time.split('.')
	hours_time = int(time_split[0])*24 + int(time_split[1])*3
	if not egg_mode:
		time_index = np.nanargmin(np.abs(complete_df.raw[a_worm].loc[:, 'age'] - hours_time))
	else:
		time_index = np.nanargmin(np.abs(complete_df.raw[a_worm].loc[:, 'egg_age'] - hours_time))
	real_time = complete_df.raw[a_worm].index[time_index]
	return real_time
开发者ID:zhang-wb,项目名称:wormPhysiology,代码行数:12,代码来源:selectData.py


示例12: test_reductions_2D_nans

def test_reductions_2D_nans():
    # chunks are a mix of some/all/no NaNs
    x = np.full((4, 4), np.nan)
    x[:2, :2] = np.array([[1, 2], [3, 4]])
    x[2, 2] = 5
    x[3, 3] = 6
    a = da.from_array(x, chunks=(2, 2))

    reduction_2d_test(da.sum, a, np.sum, x, False, False)
    reduction_2d_test(da.prod, a, np.prod, x, False, False)
    reduction_2d_test(da.mean, a, np.mean, x, False, False)
    reduction_2d_test(da.var, a, np.var, x, False, False)
    reduction_2d_test(da.std, a, np.std, x, False, False)
    reduction_2d_test(da.min, a, np.min, x, False, False)
    reduction_2d_test(da.max, a, np.max, x, False, False)
    reduction_2d_test(da.any, a, np.any, x, False, False)
    reduction_2d_test(da.all, a, np.all, x, False, False)

    reduction_2d_test(da.nansum, a, np.nansum, x, False, False)
    reduction_2d_test(da.nanprod, a, nanprod, x, False, False)
    reduction_2d_test(da.nanmean, a, np.nanmean, x, False, False)
    with pytest.warns(None):  # division by 0 warning
        reduction_2d_test(da.nanvar, a, np.nanvar, x, False, False)
    with pytest.warns(None):  # division by 0 warning
        reduction_2d_test(da.nanstd, a, np.nanstd, x, False, False)
    with pytest.warns(None):  # all NaN axis warning
        reduction_2d_test(da.nanmin, a, np.nanmin, x, False, False)
    with pytest.warns(None):  # all NaN axis warning
        reduction_2d_test(da.nanmax, a, np.nanmax, x, False, False)

    assert_eq(da.argmax(a), np.argmax(x))
    assert_eq(da.argmin(a), np.argmin(x))
    with pytest.warns(None):  # all NaN axis warning
        assert_eq(da.nanargmax(a), np.nanargmax(x))
    with pytest.warns(None):  # all NaN axis warning
        assert_eq(da.nanargmin(a), np.nanargmin(x))
    assert_eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
    assert_eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
    with pytest.warns(None):  # all NaN axis warning
        assert_eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
    with pytest.warns(None):  # all NaN axis warning
        assert_eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
    assert_eq(da.argmax(a, axis=1), np.argmax(x, axis=1))
    assert_eq(da.argmin(a, axis=1), np.argmin(x, axis=1))
    with pytest.warns(None):  # all NaN axis warning
        assert_eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))
    with pytest.warns(None):  # all NaN axis warning
        assert_eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))
开发者ID:mmngreco,项目名称:dask,代码行数:48,代码来源:test_reductions.py


示例13: maximum_posterior

    def maximum_posterior(self):
        """Returns the maximum log posterior (minimum negative log posterior)
        from the set of chains, along with the position giving the maximum
        posterior.
        """
        if not self.chains:
            raise Exception("There are no chains in the MCMCSet.")

        max_posterior = np.inf
        max_posterior_position = None
        for chain in self.chains:
            # Make sure the chain is not empty!
            if len(chain.posteriors) > 0:
                chain_max_posterior_index = np.nanargmin(chain.posteriors)
                chain_max_posterior = \
                                chain.posteriors[chain_max_posterior_index]
                if chain_max_posterior < max_posterior:
                    max_posterior = chain_max_posterior
                    max_posterior_position = \
                                chain.positions[chain_max_posterior_index]

        # Check if there are no positions
        if max_posterior_position is None:
            raise NoPositionsException('The maximum posterior could not be determined '
                                       'because there are no accepted positions.')

        return (max_posterior, max_posterior_position)
开发者ID:LoLab-VU,项目名称:bayessb,代码行数:27,代码来源:multichain.py


示例14: apply_roi

    def apply_roi(self, roi):
        if not isinstance(roi, PointROI):
            raise NotImplementedError("Only PointROI supported")

        if self._layout is None or self.display_data is None:
            return

        x, y = roi.x, roi.y
        if not roi.defined():
            return

        xs, ys = self._layout[:, ::3]
        parent_ys = self._layout[1, 1::3]

        delt = np.abs(x - xs)
        delt[y > ys] = np.nan
        delt[y < parent_ys] = np.nan

        if np.isfinite(delt).any():
            select = np.nanargmin(delt)
            if self.select_substruct:
                select = self._substructures(select)
            select = np.asarray(select, dtype=np.int)
        else:
            select = np.array([], dtype=np.int)

        state = CategorySubsetState(self.display_data.pixel_component_ids[0],
                                    select)

        EditSubsetMode().update(self.collect, state,
                                focus_data=self.display_data)
开发者ID:borkin,项目名称:glue,代码行数:31,代码来源:dendro_client.py


示例15: maximum_likelihood

    def maximum_likelihood(self):
        """Returns the maximum log likelihood (minimum negative log likelihood)
        from the set of chains, along with the position giving the maximum
        likelihood.
        """
        if not self.chains:
            raise Exception("There are no chains in the MCMCSet.")

        max_likelihood = np.inf
        max_likelihood_position = None
        for chain in self.chains:
            # Make sure the chain is not empty!
            if len(chain.likelihoods) > 0:
                chain_max_likelihood_index = np.nanargmin(chain.likelihoods)
                chain_max_likelihood = \
                                chain.likelihoods[chain_max_likelihood_index]
                if chain_max_likelihood < max_likelihood:
                    max_likelihood = chain_max_likelihood
                    max_likelihood_position = \
                                chain.positions[chain_max_likelihood_index]

        # Check if there are no positions
        if max_likelihood_position is None:
            raise NoPositionsException('The maximum likelihood could not be '
                        'determined because there are no accepted positions.')
        return (max_likelihood, max_likelihood_position)
开发者ID:LoLab-VU,项目名称:bayessb,代码行数:26,代码来源:multichain.py


示例16: get_closest_inconsistencies

    def get_closest_inconsistencies(self, vector, distance_measure, max_height=numpy.inf, check_constant=False,
                                    constant_prob=1.0):
        num_trees = len(self.trees) if max_height >= len(self.height_levels) else self.height_levels[max_height]

        consistent_indices = ~numpy.isnan(vector)
        if not numpy.any(consistent_indices):
            tree_index = random.randrange(num_trees)
            return self.trees[tree_index][:], self.semantic_array[tree_index]
        consistent_vector = vector[consistent_indices]

        if not numpy.all(numpy.isfinite(consistent_vector)):
            tree_index = random.randrange(num_trees)
            return self.trees[tree_index][:], self.semantic_array[tree_index]
        if is_constant(consistent_vector):
            return [gp.Terminal(consistent_vector[0], False, float)], [consistent_vector[0]] * len(vector)

        dists = distance_measure(self.semantic_array[:num_trees, consistent_indices], consistent_vector, axis=1)
        min_distance_index = numpy.nanargmin(dists)

        if check_constant and random.random() < constant_prob:
            constant = numpy.median(consistent_vector).item()
            constant_distance = distance_measure(consistent_vector, constant)
            if constant_distance < dists[min_distance_index]:
                return [gp.Terminal(constant, False, float)], [constant] * len(vector)

        return self.trees[min_distance_index][:], self.semantic_array[min_distance_index]
开发者ID:4sp1r3,项目名称:gecco_2016,代码行数:26,代码来源:library.py


示例17: otsu

def otsu(img):
    # Find within-class variance for each candidate threshold value. Choose the
    # one that minimises it.
    n_pix = np.prod(img.shape)

    threshold_variances = np.inf * np.ones((254,1))
    for i in range(254):
        t = i + 1
        bin_masks = [img < t, img >= t]
        # bin_masks[0] gives the less-than mask.
        # bin_masks[1] gives the geq mask.
        vals = map(lambda mask : img[mask], bin_masks)

        N = map(np.sum, bin_masks)
        mu = map(np.mean, vals)
        sigma = map(np.std, vals)
        variance = map(lambda x : x ** 2, sigma)
        threshold_variances[i] = \
                (N[0] * variance[0] + N[1] * variance[1]) / n_pix
    
    min_thresh = np.nanargmin(threshold_variances[1:255])
    thresholded = np.copy(img)

    thresholded[img < min_thresh] = 0
    thresholded[img >= min_thresh] = 1
    return thresholded
开发者ID:shantanu-gupta,项目名称:ca-kitchen,代码行数:26,代码来源:img_utilities.py


示例18: make_timing_params

    def make_timing_params(self, begin, end, snap_vred=True):

        '''Compute tight parameterized time ranges to include given timings.

        Calculates appropriate time ranges to cover given begin and end timings
        over all GF points in the store. A dict with the following keys is
        returned:

        * ``'tmin'``: time [s], minimum of begin timing over all GF points
        * ``'tmax'``: time [s], maximum of end timing over all GF points
        * ``'vred'``, ``'tmin_vred'``: slope [m/s] and offset [s] of reduction
          velocity [m/s] appropriate to catch begin timing over all GF points
        * ``'tlenmax_vred'``: maximum time length needed to cover all end
          timings, when using linear slope given with (`vred`, `tmin_vred`) as
          start
        '''

        data = []
        for args in self.config.iter_nodes(level=-1):
            tmin = self.t(begin, args)
            tmax = self.t(end, args)
            x = self.config.get_distance(args)
            data.append((x, tmin, tmax))

        xs, tmins, tmaxs = num.array(data, dtype=num.float).T

        i = num.nanargmin(tmins)
        tminmin = tmins[i]
        x_tminmin = xs[i]
        dx = (xs - x_tminmin)
        dx = num.where(dx != 0.0, dx, num.nan)
        s = (tmins - tminmin) / dx
        sred = num.min(num.abs(s[num.isfinite(s)]))

        deltax = self.config.distance_delta

        if snap_vred:
            tdif = sred*deltax
            tdif2 = self.config.deltat * math.floor(tdif / self.config.deltat)
            sred = tdif2/self.config.distance_delta

        tmin_vred = tminmin - sred*x_tminmin
        if snap_vred:
            xe = x_tminmin - int(x_tminmin / deltax) * deltax
            tmin_vred = float(
                self.config.deltat *
                math.floor(tmin_vred / self.config.deltat) - xe * sred)

        tlenmax_vred = num.nanmax(tmax - (tmin_vred + sred*x))
        if sred != 0.0:
            vred = 1.0/sred
        else:
            vred = 0.0

        return dict(
            tmin=tminmin,
            tmax=num.nanmax(tmaxs),
            tmin_vred=tmin_vred,
            tlenmax_vred=tlenmax_vred,
            vred=vred)
开发者ID:josephwinston,项目名称:pyrocko,代码行数:60,代码来源:store.py


示例19: decide_migration_migrationlikelihood_woi

	def decide_migration_migrationlikelihood_woi(self):
		migrate_me_maybe = (self.window_overload_index > self.relocation_thresholds)[0]
		if np.sum(migrate_me_maybe) > 0:
			indexes = np.array(np.where(migrate_me_maybe)).tolist()[0] # potential migration sources
			set_of_vms = list()
			for i in indexes:
				partial = (self.location[:, i] == 1).transpose()
				newly_found = np.array(np.where(partial)).tolist()
				set_of_vms += newly_found[0]
			set_of_vms = sorted(set_of_vms)
			pms = [x.get_pm() for x in self.vms]
			pm_volumes = np.array([x.get_volume() for x in self.pms])
			vm_volumes = np.array([x.get_volume_actual() for x in self.vms])
			vm_migrations = np.array([x.get_migrations() for x in self.vms])
			available_volume_per_pm = pm_volumes - self.physical_volume_vector
			available_capacity = [available_volume_per_pm[x.get_pm()] for x in self.vms]
			plan_coefficients = np.array([x.plan.get_coefficient() for x in self.vms])
			minimize_me = -1.0/plan_coefficients * (vm_volumes + available_capacity) + plan_coefficients * vm_migrations
			vm_migrate = np.nanargmin(minimize_me)
			pm_source = self.vms[vm_migrate].get_pm()
			# avoiding to select the source machine as destination by using nan
			available_volume_per_pm[pm_source] = np.nan
			pm_destination = np.nanargmax(available_volume_per_pm)
			self.migrate(vm_migrate, pm_source, pm_destination)
			self.integrated_overload_index[0,pm_source] = 0
			
开发者ID:alleneben,项目名称:vm-migration-sim,代码行数:25,代码来源:MigrationManager.py


示例20: find_closest

 def find_closest(self):
     msg = self.current_laser_msg
     rngs = msg.ranges
     idx = np.nanargmin(rngs)
     self.say("idx: " + str(idx))                
     rad = self.index_to_rad(idx)
     return rngs[idx], rad
开发者ID:wjwwood,项目名称:pyturtlebot,代码行数:7,代码来源:turtlebot.py



注:本文中的numpy.nanargmin函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.nanmax函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.nanargmax函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap